prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from pandas_datareader import data as web
import pandas as pd
import datetime as dt
import numpy as np
import requests
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
class PriceReader:
def __init__(self, brTickerList, usTickerList, startDate='2018-01-01'):
self.brTickerList = brTickerList
self.usTickerList = usTickerList
self.startDate = startDate
self.fillDate = dt.datetime.today().strftime('%m-%d-%Y')
self.df = pd.DataFrame(columns=['Date'])
def load(self):
# Read BR market data
if((self.brTickerList != None) and (len(self.brTickerList) > 0)):
self.df = self.readData(self.brTickerList, self.startDate).reset_index()
self.df.columns = self.df.columns.str.replace('\.SA','')
# Read US Market data
if((self.usTickerList != None) and (len(self.usTickerList) > 0)):
self.df = self.df.merge(self.readUSData(self.usTickerList, self.startDate).reset_index(), how='outer', on='Date')
self.df = self.df.set_index('Date').sort_index()
# self.df.to_csv('debug.csv', sep='\t')
indexList = ['^BVSP', '^GSPC', 'BRLUSD=X']
self.brlIndex = self.readUSData(indexList, self.startDate).reset_index()
self.brlIndex.rename(columns={'^BVSP':'IBOV', '^GSPC':'S&P500', 'BRLUSD=X':'USD'}, inplace=True)
self.brlIndex = self.brlIndex.set_index('Date')
# display(self.brlIndex)
def setFillDate(self, date):
self.fillDate = date
def fillCurrentValue(self, row):
row['PRICE'] = self.getCurrentValue(row['SYMBOL'], self.fillDate)
return row
def readData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + '.SA '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def readUSData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + ' '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def getHistory(self, code, start='2018-01-01'):
return self.df.loc[start:][code]
def getCurrentValue(self, code, date=None):
if(date == None):
return self.df.iloc[-1][code]
available, date = self.checkLastAvailable(self.df, date, code)
if available:
return self.df.loc[date][code]
return self.df.iloc[0][code]
def getIndexHistory(self, code, end):
ret = self.brlIndex.loc[:end][code]
return ret.dropna()
def getIndexCurrentValue(self, code, date=None):
if(date == None):
return self.brlIndex.iloc[-1][code]
available,date = self.checkLastAvailable(self.brlIndex, date, code)
if available:
return self.brlIndex.loc[date][code]
return self.brlIndex.iloc[0][code]
def checkLastAvailable(self, dtframe, loockDate, field):
date = pd.to_datetime(loockDate)
day = pd.Timedelta(1, unit='d')
#Look for last available date
while((not (date in dtframe.index)) or pd.isna(dtframe.loc[date][field])):
date = date - day
if(date < dtframe.index[0]):
return False,0
return True,date
# -------------------------------------------------------------------------------------------------
class ADVFN_Page:
urlDict = [{ 'url': 'https://br.advfn.com/bolsa-de-valores/bovespa/{}/dividendos/historico-de-proventos', 'index': 5 },
{ 'url': 'https://br.advfn.com/bolsa-de-valores/bovespa/{}/dividendos', 'index': 6}]
def read(self, ticker):
res = pd.DataFrame()
for attempt in range(2):
url = self.urlDict[attempt]['url'].format(ticker)
r = requests.get(url, headers=http_header)
# print(url, urlDict[attempt]['index'])
try:
rawTable = pd.read_html(r.text, thousands='.',decimal=',')[self.urlDict[attempt]['index']]
# display(rawTable)
if(len(rawTable.columns) < 5):
raise
except:
continue
res = rawTable
if ('Mês de Referência' in res.columns):
res.rename(columns={'Mês de Referência':'Tipo do Provento'}, inplace=True)
res['Tipo do Provento'] = 'Dividendo'
res.rename(columns={'Tipo do Provento':'OPERATION', 'Data-Com':'DATE', 'Pagamento':'PAYDATE', 'Valor':'PRICE', 'Dividend Yield':'YIELD'}, inplace=True)
break
return res
class Fundamentus_Page:
urlDict = [ { 'url': 'https://www.fundamentus.com.br/proventos.php?papel={}&tipo=2', 'index': 0 },
{ 'url': 'https://www.fundamentus.com.br/fii_proventos.php?papel={}&tipo=2', 'index': 0}]
def read(self, ticker):
res = pd.DataFrame()
# if (ticker != 'SMLS3'):
# return res
for attempt in range(2):
url = self.urlDict[attempt]['url'].format(ticker)
r = requests.get(url, headers=http_header)
# print(url, self.urlDict[attempt]['index'])
try:
rawTable = pd.read_html(r.text, thousands='.',decimal=',')[self.urlDict[attempt]['index']]
# print(rawTable)
if(len(rawTable.columns) < 4):
raise
except:
continue
res = rawTable
if('Por quantas ações' in res.columns):
res['Valor'] /= res['Por quantas ações']
if ('Última Data Com' in res.columns):
res.rename(columns={'Última Data Com':'Data'}, inplace=True)
res.rename(columns={'Tipo':'OPERATION', 'Data':'DATE', 'Data de Pagamento':'PAYDATE', 'Valor':'PRICE'}, inplace=True)
break
# print(res)
return res
class DividendReader:
def __init__(self, dataFrame, startDate='2018-01-01'):
self.brTickerList = dataFrame[dataFrame['TYPE'] == 'Ação']['SYMBOL'].unique()
self.usTickerList = dataFrame[dataFrame['TYPE'] == 'STOCK']['SYMBOL'].unique()
self.fiiList = dataFrame[dataFrame['TYPE'] == 'FII']['SYMBOL'].unique()
self.startDate=startDate
self.df = pd.DataFrame(columns=['SYMBOL', 'PRICE', 'PAYDATE'])
def __init__(self, brTickers, fiiTickers, usTickers, startDate='2018-01-01'):
self.brTickerList = brTickers
self.usTickerList = usTickers
self.fiiList = fiiTickers
self.startDate = startDate
self.df = pd.DataFrame(columns=['SYMBOL', 'DATE','PRICE', 'PAYDATE'])
def load(self):
if(self.brTickerList != None and len(self.brTickerList) > 0):
self.df = self.df.append(self.loadData(self.brTickerList))
if(self.fiiList != None and len(self.fiiList) > 0):
self.df = self.df.append(self.loadData(self.fiiList))
if(self.usTickerList != None and len(self.usTickerList) > 0):
self.df = self.df.append(self.loadData(self.usTickerList))
if(not self.df.empty):
self.df = self.df.sort_values(by=['DATE', 'SYMBOL'])
self.df = self.df[self.df['DATE'] >= self.startDate]
self.df.set_index('DATE', inplace = True)
self.df = self.df[['SYMBOL', 'PRICE', 'PAYDATE']]
# print(self.df.tail(5))
def loadData(self, paperList):
tb =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
|
tm.assert_almost_equal(result, exp)
|
pandas.util.testing.assert_almost_equal
|
import yfinance as yahoo
import pandas as pd, numpy as np
import ssl
import urllib.request
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
#from pytickersymbols import PyTickerSymbols #https://pypi.org/project/pytickersymbols/
# functions to get info about each market and their current stock tickets
# markets to operate: USA (Nasdaq & SP500), England, China, Japan, Canada, Brazil, Australia
# the handlers will result with a list of metrics that will be use by main script
# to build respective portfolio
def GSPC():
USA = pd.read_html("https://topforeignstocks.com/indices/components-of-the-sp-500-index/")[0]
USA = list(USA.Ticker.values)
freeRisk = '^GSPC'
df = yahoo.download(USA,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def Cedears():
comafi = pd.read_html('https://www.comafi.com.ar/2254-CEDEAR-SHARES.note.aspx')[0]
# sorteamos por orden alfabético
comafi = comafi.sort_values('Símbolo BYMA',axis=0,ascending=True)
comafi.index = range(len(comafi)) # update index order values
cells = list(comafi['Símbolo BYMA'].values)
# cells.index('AAPL') way to get index number where ticker is located
cedears = [c + '.BA' for c in cells]
volume = yahoo.download(cedears,period="80d")['Volume'].fillna(method='ffill')
votal = pd.DataFrame(index=volume.index)
votal['totav'] = volume.T.sum()
percentage = volume.div(votal['totav'], axis=0)
ordered = pd.DataFrame(percentage.sum().T,columns=['percentage'],index=percentage.columns)
ordered = ordered / ordered.sum() # ensure you round to 100%
orderedalph = ordered.sort_values('percentage',axis=0,ascending=False)
liquid = orderedalph.cumsum()
listado = list(liquid.head(50).index.values)
listado = [i.replace('.BA','') for i in listado]
lista = []
for i in range(len(listado)):
lista.append(comafi['Ticker en Mercado de Origen'][cells.index(f'{listado[i]}')])
freeRisk = '^GSPC'
df = yahoo.download(lista,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def NIKKEI():
nikkei = pd.read_html("https://topforeignstocks.com/indices/the-components-of-the-nikkei-225-index/")[0]
nikkei['tickets'] = [t + '.T' for t in nikkei.Code.astype(str)]
nikkei = list(nikkei.tickets.values)
freeRisk = '^N225'
df = yahoo.download(nikkei,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def Shanghai():
china = pd.read_html('https://tradingeconomics.com/china/stock-market')[1]
shanghai = [i + '.SS' for i in list(china['Unnamed: 0'].astype(str))]
freeRisk = "000001.SS"
df = yahoo.download(shanghai,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def BOVESPA():
bovespa = pd.read_html("https://topforeignstocks.com/indices/components-of-the-bovespa-index/")[0]
bovespa = list(bovespa.Ticker.values)
freeRisk = '^BVSP'
df = yahoo.download(bovespa,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def CANADA():
canada = pd.read_html("https://topforeignstocks.com/indices/the-components-of-the-sptsx-composite-index/")[0]
canada = list(canada.Ticker.values)
freeRisk = '^GSPTSE'
df = yahoo.download(canada,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio = pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
orderedsharpe = sharpe_ratio.sort_values('SharpeRatio', axis=0, ascending=False)
lista = list(orderedsharpe.head(50).index.values)
df = yahoo.download(lista,period="1y",interval="60m")["Adj Close"].fillna(method="ffill")
riskfree = yahoo.download(freeRisk, period="1y",interval="60m")['Adj Close'].fillna(method='ffill')
pct = df.pct_change().dropna() #(how='all')
riskpct = riskfree.pct_change().dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
numerator = pct.sub(riskpct,axis=0)
downside_risk = ((numerator[numerator<0].fillna(0))**2).mean()
noa = len(df.columns)
weigths = np.random.random(noa)
weigths /= np.sum(weigths)
observations = len(df.index)
mean_returns = df.pct_change().mean()
cov = df.pct_change().cov()
alpha = 0.1
rf = riskpct.mean()
num_portfolios = 1000
Upbound = 0.06
result = [df,riskfree,pct,riskpct,mean,mean_rf,std,numerator,downside_risk,noa,weigths\
,observations,mean_returns,cov,alpha,rf,num_portfolios,Upbound]
return result
def FTSE():
england = pd.read_html("https://topforeignstocks.com/indices/components-of-the-ftse-100-index/")[0]
england = list(england.Ticker.values)
freeRisk = '^FTSE'
df = yahoo.download(england,period="1y")["Adj Close"].fillna(method="ffill")
pct = df.pct_change()#.dropna()
mean = pd.DataFrame(pct.mean(),columns=['Mean'],index=pct.columns)
riskpct = mean.mean()
mean_rf = mean - riskpct.mean()
std = pd.DataFrame(pct.std(),columns=['Std'],index=pct.columns)
sharpe_ratio =
|
pd.DataFrame(mean_rf['Mean']/(std['Std']), columns=['SharpeRatio'],index=pct.columns)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Helper module for performing MTSL GWAS inside of MTMLSEM model.
Note that this has little to do with MTMLSEM, it merely fits the classical LMM
model of the kind:
Y = X B + E + U,
where Y and X are deterministic data matrices, B is a matrix of regression
coefficients, E and U are matrices random matrices with U being the random
effect matrix, that takes genetic kinship between individuals into an account.
"""
from itertools import combinations, product
from tqdm import tqdm
import pandas as pd
import numpy as np
from utils import translate_names, unique_mapping
def gwas_lmm(Model, y: list[str], phenos, genes, desc='', init_args=None,
fit_args=None, dropna=True, verbose=True):
"""
Multi-trait single-locus GWAS via linear (possibly mixed) model.
Parameters
----------
Model : class
semopy class.
y : list[str]
List of phenotype names.
phenos : pd.DataFrame
Phenotypes + possibly other variables.
genes : pd.DataFrame
Genotypes/SNPs.
desc : str, optional
Extra model description. The default is ''.
init_args : dict, optional
Extra arguments for Model constructor. The default is None.
fit_args : dict, optional
Extra arguments for Model fit method (e.g., k). The default is None.
dropna : bool, optional
If True, then NaN rows are dropped for each gene test. The default is
True.
Returns
-------
pd.DataFrame
GWAS results.
"""
if init_args is None:
init_args = dict()
if fit_args is None:
fit_args = dict()
res = list()
desc= desc + '\n{} ~ snp'.format(', '.join(y))
for a, b in combinations(y, 2):
desc += f'\n{a} ~~ {b}'
m = Model(desc, **init_args)
phenos = phenos.copy()
it = genes.iteritems()
if verbose:
it = tqdm(list(it))
for name, gene in it:
chr, pos = translate_names(name)
phenos['snp'] = gene.values
if dropna:
data = phenos.dropna()
try:
r = m.fit(data, clean_slate=True, **fit_args)
if type(r) is not tuple:
succ = r.success
fun = r.fun
else:
succ = r[0].success & r[1].success
fun = r[1].fun
except np.linalg.LinAlgError:
succ = False
if not succ:
t = [name, chr, pos, float('nan')] + [1.0] * len(y)
t += [float('nan')] * len(y)
res.append(t)
else:
ins = m.inspect()
ins = ins[(ins['rval'] == 'snp') & (ins['op'] == '~')]
pvals = list()
ests = list()
for _, row in ins.iterrows():
pvals.append(row['p-value'])
ests.append(row['Estimate'])
res.append([name, chr, pos, fun] + pvals + ests)
cols = ['SNP', 'chr', 'pos'] + [f'{p}_p-value' for p in y] + [f'{p}_b'
for p in y]
return pd.DataFrame(res, columns=cols)
def gwas_w(lt):
gs, lt = lt
mod, y, phenos, genes, desc, init_args, fit_args = lt
return gwas(mod, y, phenos, genes[gs], desc, init_args, fit_args,
verbose=False)
def gwas(Model, y: list[str], phenos, genes, desc='', init_args=None,
fit_args=None, num_processes=-1, chunk_size=1000, verbose=True):
"""
Multi-trait single-locus GWAS with multiprocessing support.
Parameters
----------
Model : class
semopy class.
y : list[str]
List of phenotype names.
phenos : pd.DataFrame
Phenotypes + possibly other variables.
genes : pd.DataFrame
Genotypes/SNPs.
desc : str, optional
Extra model description. The default is ''.
init_args : dict, optional
Extra arguments for Model constructor. The default is None.
fit_args : dict, optional
Extra arguments for Model fit method (e.g., k). The default is None.
num_processes : int, optional
Number of processes to run. If -1, then it is selected to number of
avaialable CPU cores minus 1. "None" is the same as 1. The default is
-1.
chunk_size : int, optional
Number of SNPs to be sent onto a single process. The default is 1000.
verbose : bool, optional
If False, then no progress bar will be printed. The default is True.
Returns
-------
pd.DataFrame
GWAS results.
"""
from multiprocessing import Pool, cpu_count
from tqdm.contrib.concurrent import process_map
if num_processes == -1:
num_processes = cpu_count() - 1
if num_processes in (None, 0, 1):
return gwas_lmm(Model, y, phenos, genes, desc, init_args, fit_args,
verbose=verbose)
# We rule out duplicate SNPs to ease the computational burden:
unique = unique_mapping(genes)
genes = genes[list(unique.keys())]
result = None
lt = list(genes.columns)
lt2 = [lt[i:i+chunk_size] for i in range(0, len(lt), chunk_size)]
lt = (Model, y, phenos, genes, desc, init_args, fit_args)
prod = product(lt2, lt)
if not verbose:
with Pool(num_processes) as p:
for t in p.map(gwas_w, prod):
if result is None:
result = t
else:
result = pd.concat([result, t])
else:
for t in process_map(gwas_w, list(prod)):
if result is None:
result = t
else:
result =
|
pd.concat([result, t])
|
pandas.concat
|
from data import load_data_gse
from contextlib import closing
import os
import shutil
import numpy as np
import pandas as pd
import urllib.request as request
def processing_gse96058(clinical):
assert isinstance(clinical, pd.DataFrame), 'Invalid clinical type. It should be a pandas data frame.'
# cleaning clinical markers
clinical = clinical.replace({'NA': None, 'na': None})
del clinical['scan-b_external_id']
clinical['instrument_model'] = clinical['instrument_model'].replace({
'HiSeq 2000': 0, 'NextSeq 500': 1})
lymph_dummies = pd.get_dummies(clinical['lymph_node_group'])
lymph_dummies.columns = ['lymph_node_group_' + c for c in lymph_dummies.columns]
clinical =
|
pd.concat([clinical, lymph_dummies], axis=1)
|
pandas.concat
|
# gather
import pandas as pd
import io
import time
import zipfile
import zlib
import urllib.request
urllib.request.urlretrieve('http://geoportal1-ons.opendata.arcgis.com/datasets/48d0b49ff7ec4ad0a4f7f8188f6143e8_3.zip',
'constituencies_super_generalised_shapefile.zip')
with zipfile.ZipFile('constituencies_super_generalised_shapefile.zip', 'r') as zip_ref:
zip_ref.extractall('constituencies_super_generalised_shapefile')
petition_list = pd.read_csv(
'https://petition.parliament.uk/archived/petitions.csv?parliament=1&state=published')
url_list = petition_list['URL'].tolist()
count, start = 0, time.time()
signatures, mp, errors = [], [], []
for petition_url in url_list:
try:
response = pd.read_json(petition_url + '.json')
response = pd.DataFrame.from_dict(response.iloc[0, 0], orient='index')
created_at = response.loc['created_at', 0]
response = pd.DataFrame.from_dict(
response.loc['signatures_by_constituency', 0])
response['created'] = created_at
signatures.extend(
response[['ons_code', 'signature_count', 'created']].values.tolist())
mp.extend(
response[['ons_code', 'name', 'mp']].values.tolist())
except:
errors.append(petition_url)
count += 1
if count % 250 == 0:
print('{} files reached in {}s'.format(count, time.time() - start))
if len(errors) != 0:
print(errors)
signatures = pd.DataFrame(
signatures, columns=['ons_code', 'signature_count', 'date'])
signatures['date'] = pd.to_datetime(signatures['date'])
signatures = signatures.set_index('date').groupby(
[pd.TimeGrouper(freq='M'), 'ons_code']).sum().reset_index().sort_values(['ons_code', 'date'])
signatures['date'] = signatures.date.dt.to_period('M')
mp = pd.DataFrame(mp, columns=['ons_code', 'constituency', 'mp']).drop_duplicates(
'ons_code', keep='last')
mp = mp.replace('Ynys M?n', 'Ynys Mon')
population = pd.read_excel(
'http://data.parliament.uk/resources/constituencystatistics/Population-by-age.xlsx', 'Data')
population = population[['ONSConstID', 'RegionName', 'PopTotalConstNum']].rename(
columns={'ONSConstID': 'ons_code', 'RegionName': 'region', 'PopTotalConstNum': 'population'})
eu =
|
pd.read_excel(
'https://secondreading.parliament.uk/wp-content/uploads/2017/02/eureferendum_constitunecy.xlsx', 'DATA')
|
pandas.read_excel
|
#definition of add_dataset that creates the meta-dataset
import pandas as pd
from pandas.core.dtypes.common import is_numeric_dtype
from scipy.stats import pearsonr
from sklearn.model_selection import train_test_split
from supervised.automl import AutoML
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder
import numpy as np
rootdir = os.path.dirname(__file__)
results_dir = rootdir + '/results/'
dataset_dir = rootdir + '/datasets_list_final/'
datasets_to_add_dir = rootdir + '/datasets_list_toadd/'
algorithm_list = ['Linear', 'Random Forest', 'Decision Tree', 'Neural Network']
def encode_y(y):
le = LabelEncoder()
le.fit(y)
y_enc = le.transform(y)
return y_enc
def compute_max_corr(df):
y = encode_y(df[df.columns[-1]])
y =
|
pd.Series(y)
|
pandas.Series
|
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier, AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
#--------------------------------------------------- READING THE DATASET -------------------------------------
data =
|
pd.read_csv("Loan_Train.csv")
|
pandas.read_csv
|
"""Tests for Resource harvesting methods."""
from typing import Any, Dict, List
import numpy as np
import pandas as pd
import pytest
from pudl.metadata.classes import Package, Resource, RESOURCE_METADATA
from pudl.metadata.helpers import most_frequent
# ---- Helpers ---- #
def _assert_frame_equal(a: pd.DataFrame, b: pd.DataFrame, **kwargs: Any) -> None:
"""Assert dataframes are equal, printing a useful error if not."""
try:
pd.testing.assert_frame_equal(a, b, **kwargs)
except AssertionError as error:
msg = "\n\n".join(["Dataframes are not equal.", str(error), str(a), str(b)])
raise AssertionError(msg)
# ---- Unit tests ---- #
def test_all_resources_valid() -> None:
"""All resources in metadata pass validation tests."""
Package.from_resource_ids(RESOURCE_METADATA)
STANDARD: Dict[str, Any] = {
"name": "r",
"harvest": {"harvest": False},
"schema": {
"fields": [
{"name": "i", "type": "integer", "harvest": {"aggregate": most_frequent}},
{"name": "j", "type": "integer", "harvest": {"aggregate": most_frequent}},
{"name": "x", "type": "integer", "harvest": {"aggregate": most_frequent}},
{"name": "y", "type": "integer", "harvest": {"aggregate": most_frequent}}
],
"primary_key": ["i", "j"]
},
}
HARVEST: Dict[str, Any] = {**STANDARD, "harvest": {"harvest": True}}
def test_resource_ignores_input_with_different_name() -> None:
"""Standard resources ignore input dataframes not named the same as themselves."""
dfs = {0:
|
pd.DataFrame([{"i": 1, "j": 1, "x": 1, "y": 1}])
|
pandas.DataFrame
|
import labtool_ex2
from labtool_ex2.dtype import UfloatArray
import pandas as pd
import numpy as np
import uncertainties.unumpy as unp
import inspect
import sys
test_list = list(range(10)) + [4, np.nan] # type: ignore
test_uarray = unp.uarray(test_list, [0.2]*len(test_list))
def test1():
print("\nTest 1\n")
ufloatarray = UfloatArray(test_list)
print(ufloatarray)
def test2():
print("\nTest 2\n")
series = pd.Series(test_uarray, dtype="ufloat")
print(series)
def test3():
print("\nTest 3\n")
df_uarray = pd.DataFrame({"ufloats": UfloatArray(test_uarray), "ints": range(len(test_uarray)), })
print([(x.nominal_value, x.std_dev, x.tag) for x in test_uarray])
print(df_uarray.dtypes)
print(type(df_uarray["ufloats"]))
def test4():
print("\nTest 4\n")
print(type(pd.Series(UfloatArray(test_uarray)).dtype))
def test5():
print("\nTest 5\n")
df_uarray = pd.DataFrame({"ufloats": test_uarray, "ints": range(len(test_uarray))})
print(df_uarray.dtypes)
def test6():
print("\nTest 6\n")
series = pd.Series(test_uarray, name="u", dtype="ufloat")
print(series.u.s)
def test7():
print("\nTest 7\n")
ints = range(len(test_uarray))
df_uarray = pd.DataFrame({"ufloats": test_uarray, "ints": ints, "strings": [chr(num**2) for num in ints]})
print(f"normal\n{df_uarray}\n")
print(f"n\n{df_uarray.u.n}\n")
print(f"s\n{df_uarray.u.s}\n")
print(f"dtypes\n{df_uarray.dtypes}\n")
print(f"dtypes n\n{df_uarray.u.n.dtypes}\n")
def test8():
print("\nTest 8\n")
ints = range(len(test_uarray))
df_uarray = pd.DataFrame({"ufloats": test_uarray, "ints": ints, "strings": [chr(num**2) for num in ints]})
df_4 = df_uarray*4
print(df_uarray)
print((df_uarray*4).iloc[1:-1, :].u.sep)
print(type(df_4.iloc[0,0]))
def test9():
print("\nTest 9\n")
ints = range(len(test_uarray))
df_uarray = pd.DataFrame({"ufloats": test_uarray, "ints": ints, "strings": [chr(num**2) for num in ints]})
print(df_uarray.u.sep)
print(df_uarray.u.sep.u.com)
print(f"\nfor comparison:\n{df_uarray}")
def test10():
print("\nTest 10\n")
df =
|
pd.read_csv("test_csv.csv")
|
pandas.read_csv
|
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import sys
import csv
import pandas as pd
from tqdm import tqdm, trange
from itertools import permutations
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
# import networkx as nx
N_TOTAL_PAPERS = 24251
N_TOTAL_AUTHORS = 42614
N_TOTAL_NODES = N_TOTAL_PAPERS + N_TOTAL_AUTHORS
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def load_reference_edges(path="dataset/"):
print('Loading edge list...')
reference_links = np.load("../edge_and_weight_01.npy")
# reference_links = np.vstack([reference_links, np.fliplr(reference_links)])
# reference_links = pd.DataFrame(reference_links).drop_duplicates().values
reference_edge_weight = np.expand_dims(reference_links[:, -1], 1)
reference_edge_type = np.zeros((reference_links.shape[0], 1), dtype = int)
# pd.DataFrame(reference_links, columns=['src', 'dst', 'weight']).to_csv(path + "reference_edgelist.csv", index=False)
reference_links = reference_links[:, :-1]
return reference_links, reference_edge_weight, reference_edge_type
def count_citation(path="dataset/"):
print("Running citation counting...")
referenced = pd.read_csv(path + "paper_reference.csv").values[:, -1]
return pd.Series(referenced).value_counts()
def load_edges(path="dataset/"):
print('Loading edge list...')
reference_links = np.load(path + "reference_paper.npy")
reference_links = np.vstack([reference_links, np.fliplr(reference_links)])
reference_links = pd.DataFrame(reference_links).drop_duplicates().values
reference_edge_weight = np.ones((reference_links.shape[0], 1), dtype = float)
##########调reference_edge
reference_edge_weight = reference_edge_weight
#########################################################
reference_edge_type = np.zeros((reference_links.shape[0], 1), dtype = int)
author_paper_links = pd.read_csv(path + "author_paper_all_with_year.csv").values[:, 0:-1]
author_paper_links[:, 0] += N_TOTAL_PAPERS
author_paper_links = np.vstack([author_paper_links, np.fliplr(author_paper_links)])
# author_paper_edges = np.hstack([author_paper_links, np.ones((author_paper_links.shape[0], 1))])
# author_paper_edges = np.hstack([author_paper_links, np.load(path + "author_paper_edge_weight.npy")]) # 1/k
author_paper_edges = np.hstack([author_paper_links, np.ones((author_paper_links.shape[0], 1))])
author_paper_edges = pd.DataFrame(author_paper_edges, columns=['i', 'j', 'w']).drop_duplicates(subset=['i', 'j']).values
author_paper_links = author_paper_edges[:, 0:-1]
# author_paper_edge_weight = np.ones((author_paper_links.shape[0], 1))
# author_paper_edge_weight = np.expand_dims(author_paper_edges[:, -1], 1) / author_paper_edges[:, -1].mean()
author_paper_edge_weight = np.expand_dims(author_paper_edges[:, -1], axis=-1)
##########调author_paper_edge
author_paper_edge_weight = author_paper_edge_weight
#######################################################################
author_paper_edge_type = np.ones((author_paper_links.shape[0], 1), dtype = int)
coauthor_links = np.load(path + "coauthor.npy").astype(int) + N_TOTAL_PAPERS
coauthor_links = np.vstack([coauthor_links, np.fliplr(coauthor_links)])
coauthor_edges = pd.DataFrame(coauthor_links).value_counts()
coauthor_links = np.asarray(list(coauthor_edges.index))
# coauthor_edge_weight = np.ones((coauthor_links.shape[0], 1))
# coauthor_edge_weight = np.expand_dims(np.asarray(list(coauthor_edges.values)), 1) / coauthor_edges.values.mean()
coauthor_edge_weight = 1 / (1 + np.exp(-0.5 * np.expand_dims(np.asarray(list(coauthor_edges.values)), 1)))
##########调coauthor_edge
coauthor_edge_weight = coauthor_edge_weight
#######################################################################
coauthor_edge_type = 2 * np.ones((coauthor_links.shape[0], 1), dtype = int)
# same_author_links = np.load(path + "paper_same_author.npy")
# same_author_links = np.vstack([same_author_links, np.fliplr(same_author_links)])
# same_author_links = pd.DataFrame(same_author_links).drop_duplicates().values
# same_author_edge_type = 3 * np.ones((same_author_links.shape[0], 1), dtype = int)
edges_unordered = np.vstack([reference_links, author_paper_links, coauthor_links])
edges_weight = np.vstack([reference_edge_weight, author_paper_edge_weight, coauthor_edge_weight])
# pd.DataFrame(np.hstack([edges_unordered, edges_weight]), columns=['src', 'dst', 'weight']).to_csv(path + "edgelist.csv", index=False)
edges_type = np.vstack([reference_edge_type, author_paper_edge_type, coauthor_edge_type])
return edges_unordered, edges_weight, edges_type
def load_data(path="dataset/", training=False):
"""Load citation network dataset (cora only for now)"""
# print('Loading dataset...')
# build graph
edges, edge_weight, edge_type = load_edges()
# print(edges.shape, edge_weight.shape, edge_type.shape)
adj = sp.coo_matrix((edge_weight[:, 0], (edges[:, 0], edges[:, 1])),
shape=(N_TOTAL_NODES, N_TOTAL_NODES),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj = normalize(adj + sp.eye(adj.shape[0]))
# g = nx.Graph(adj)
# pr = nx.pagerank(g, alpha=0.9)
# pr_list = []
# for i in pr.values():
# pr_list.append(i)
# pr_list = pr_list / max(pr.values())
# print(np.shape(pd.unique(edge_type[:, 0]))[0])
paper_label = np.load(path + "paper_label.npy")
labels = encode_onehot(paper_label[:, -1])
idx_train, idx_val, _, _ = train_test_split(np.arange(len(paper_label)), labels, test_size=0.05, random_state=1)
idx_test = np.array(range(len(paper_label), N_TOTAL_PAPERS))
# features = np.load(path + "vgae_embedding.npy")
# features = np.zeros((N_TOTAL_NODES, 14))
features = np.zeros((N_TOTAL_NODES, 13))
# features = np.zeros((N_TOTAL_NODES, 10))
# features[:len(paper_label), :10] = labels
# features[idx_val, :10] = np.zeros((len(idx_val), 10))
###########全1
# features = np.ones((N_TOTAL_NODES, 128))
#####随机数
# features = np.random.random((N_TOTAL_NODES, 128))
#n2v
# features = np.load(path + 'N2V_128d_3t.npy')
# features = sp.csr_matrix(features, dtype=np.float32)
# --------------------author nad unlabelled paper feature-------------
# features[:, :10] = np.load(path + "features_v2.npy")
# features[features>1] = 1
# --------------------------------------------------------------------
# -------------------labelled paper feature---------------------------
features[:len(paper_label), :10] = labels
# --------------------------------------------------------------------
# features[len(paper_label):N_TOTAL_PAPERS, :10] = np.load(path + "unlabel_features.npy")*0.5
# features[N_TOTAL_PAPERS:, :10] = np.load(path + "author_features.npy") * 0.3
if not training:
features[:len(paper_label), -3] = 1
features[len(paper_label):N_TOTAL_PAPERS, -2] = 1
features[N_TOTAL_PAPERS:, -1] = 1
# features[len(paper_label):, -1] = 1
else:
features[idx_val, :10] = np.zeros((len(idx_val), 10))
# features[:, -4] = pr_list
features[idx_train, -3] = 1
features[idx_val, -2] = 1
features[len(paper_label):N_TOTAL_PAPERS, -2] = 1
features[N_TOTAL_PAPERS:, -1] = 1
# pd.DataFrame(features).to_csv(path + "new_features.csv")
# publication_year = pd.read_csv(path + "author_paper_all_with_year.csv").drop_duplicates(subset=["paper_id"]).values[:, -1]
# extra_features = pd.read_csv(path + "node_extra_features.csv").values
# features = np.hstack([extra_features, encode_onehot(publication_year)])
# features = np.load("../N2V_256d_{}t.npy".format(np.shape(pd.unique(edge_type[:, 0]))[0]))
rows, cols = np.arange(N_TOTAL_NODES), np.arange(N_TOTAL_NODES)
# features = sp.csr_matrix((np.ones((N_TOTAL_NODES, )), (rows, cols)),
# shape=(N_TOTAL_NODES, N_TOTAL_NODES), dtype=np.float32)
# features = sparse_mx_to_torch_sparse_tensor(features)
class_tot = np.sum(labels, axis = 0)
loss_coef = torch.from_numpy(np.mean(class_tot) / class_tot).float()
# features = normalize(features)
# adj = normalize(adj + sp.eye(adj.shape[0]))
features = torch.FloatTensor(features)
# features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test, loss_coef
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def get_paper_label(test_output, given_labels):
result = np.zeros((24251, ))
result[:len(given_labels)] = given_labels
preds = test_output.max(1)[1]
result[len(given_labels):] = preds[len(given_labels):]
return result
def create_csv_from_result(result, submission_version='0'):
author_paper_dic = np.load('author_paper_dic.npy',allow_pickle=True).item() # dictionary like {94: [25, 21083]}
# transform to an "author with label" version
author_label_dic = {} # dictionary like {0: [0, 1, 5]}
for key in author_paper_dic:
for index in author_paper_dic[key]:
if key not in author_label_dic:
author_label_dic[key] = [int(result[index])]
else:
if int(result[index]) not in author_label_dic[key]:
author_label_dic[key].append(int(result[index]))
unfiltered_submission_name = 'submission/unfiltered_submission_'+submission_version+'.csv'
f = open(unfiltered_submission_name,'w',encoding='utf-8',newline='' "")
csv_writer = csv.writer(f)
csv_writer.writerow(["author_id","labels"])
for key in author_label_dic:
csv_writer.writerow([key,' '.join([str(x) for x in author_label_dic[key]])])
f.close()
def filter_csv(submission_version='0'):
test_set = pd.read_csv("dataset/authors_to_pred.csv")
test_authors = test_set.values.reshape((37066, ))
unfiltered_submission_name = 'submission/unfiltered_submission_'+submission_version+'.csv'
submission_name = 'submission/submission_'+submission_version+'.csv'
submit =
|
pd.read_csv(unfiltered_submission_name)
|
pandas.read_csv
|
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sn
import matplotlib.patches as mpatches
from LoopVm_plot import *
from natsort import index_natsorted, order_by_index
#sn.set_context("paper", font_scale = 2)
#single VM Only - (NO Comparisons)
def latency_equilibrium(df, title):
fig, ax = plt.subplots()
ax.set_title("Migration Mechanism - Deviation relative to Latency Equilibrium " + title)
ax.scatter(df['INDEX'], df['delta_lat_real_end'], label = 'Latency delta as migration completed')
ax.plot(df['INDEX'], df['delta_lat_real_end'])
ax.scatter(df['INDEX'], df['delta_lat_init'], label = 'Latency delta as migration started')
ax.plot(df['INDEX'], df['delta_lat_init'])
#ax.scatter(df['INDEX'], df['lat_source_init'], label = 'Latency to the source as migration started')
#ax.plot(df['INDEX'], df['lat_source_init'])
#ax.scatter(df['INDEX'], df['lat_target_init'], label = 'Latency to the target as migration started')
#ax.plot(df['INDEX'], df['lat_target_init'])
#ax.scatter(df['INDEX'], df['lat_source_end'], label = 'Latency to the source as migration completed')
#ax.plot(df['INDEX'], df['lat_source_end'])
#ax.scatter(df['INDEX'], df['lat_target_end'], label = 'Latency to the target as migration completed')
#ax.plot(df['INDEX'], df['lat_target_end'])
#ax.axhline(0,color= 'black')
#i=0
#for id_lte, idtrip in zip(df['id_edge_origin'],df['TripID']):
# ax.annotate("S_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['lat_source_init'].values[i]))
# ax.annotate("S_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['lat_source_end'].values[i]))
# i = i + 1
#i=0
#for id_lte, idtrip in zip(df['id_edge_target'],df['TripID']):
# ax.annotate("T_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['lat_target_init'].values[i]))
# ax.annotate("T_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['lat_target_end'].values[i]))
# i = i + 1
#i=0
#for id_lte_o, id_lte_d in zip(df['id_edge_origin'],df['id_edge_target']):
# ax.annotate(""+str(id_lte_o) + "->" + str(id_lte_d), (df['INDEX'].values[i], df['delta_lat_init'].values[i]))
# ax.annotate(""+str(id_lte_o) + "->" + str(id_lte_d), (df['INDEX'].values[i], df['delta_lat_real_end'].values[i]))
# i = i + 1
ax.set_xlabel('Migrations Performed')
ax.set_ylabel('Delta Latency in ms')
ax.legend()
return 1
#single VM Only - (NO Comparisons)
def distance_equilibrium(df, title):
fig, ax = plt.subplots()
ax.set_title("Migration Mechanism - Deviation relative to Distance Equilibrium " + title)
ax.scatter(df['INDEX'], df['delta_dist_real_end'], label = 'Distance to equilibrium as migration completed')
ax.plot(df['INDEX'], df['delta_dist_real_end'])
ax.scatter(df['INDEX'], df['delta_dist_init'], label = 'Distance to equilibrium as migration started')
ax.plot(df['INDEX'], df['delta_dist_init'])
ax.scatter(df['INDEX'], df['dist_source_init'], label = 'Distance to the source as migration started')
ax.plot(df['INDEX'], df['dist_source_init'])
ax.scatter(df['INDEX'], df['dist_target_init'], label = 'Distance to the target as migration started')
ax.plot(df['INDEX'], df['dist_target_init'])
ax.scatter(df['INDEX'], df['dist_source_end'], label = 'Distance to the source as migration finished')
ax.plot(df['INDEX'], df['dist_source_end'])
ax.scatter(df['INDEX'], df['dist_target_end'], label = 'Distance to the target as migration finished')
ax.plot(df['INDEX'], df['dist_target_end'])
ax.axhline(0,color= 'black')
i=0
for id_lte, idtrip in zip(df['id_edge_origin'],df['TripID']):
ax.annotate("S_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['dist_source_init'].values[i]))
ax.annotate("S_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['dist_source_end'].values[i]))
i = i + 1
i=0
for id_lte, idtrip in zip(df['id_edge_target'],df['TripID']):
ax.annotate("T_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['dist_target_init'].values[i]))
ax.annotate("T_ID_LTE: "+str(id_lte) + "\nTripID: " + str(idtrip), (df['INDEX'].values[i], df['dist_target_end'].values[i]))
i = i + 1
i=0
for id_lte_o, id_lte_d in zip(df['id_edge_origin'],df['id_edge_target']):
ax.annotate(""+str(id_lte_o) + "->" + str(id_lte_d), (df['INDEX'].values[i], df['delta_dist_init'].values[i]))
ax.annotate(""+str(id_lte_o) + "->" + str(id_lte_d), (df['INDEX'].values[i], df['delta_dist_real_end'].values[i]))
i = i + 1
ax.set_xlabel('Migrations Performed')
ax.set_ylabel('Delta Distance in m')
ax.legend()
return 1
#single VM Only - Single Plot + Compare Plot (OLD WAY)
def client_latency(df, new_fig, fig, ax, label):
if (new_fig == 1):
fig, ax = plt.subplots()
ax.set_title("Client Latency Evolution by Trip ")
ax.plot(df.index, df['Latency'], label = 'Latency to the station ' + label)
df = df.drop_duplicates(subset=['TripID'])
for i in range(len(df.index)):
latency = df.at[df.index[i], 'Latency']
tripid = df.at[df.index[i], 'TripID']
ax.annotate("TripID: " + str(tripid), (df.index[i], latency))
ax.scatter(df.index[i], latency, color='blue')
ax.set_xlabel('Path coordinates')
ax.set_ylabel('Latency in milliseconds')
ax.legend()
return fig, ax
#single VM Only - Single Plot + Compare Plot (OLD WAY)
def client_distance(df, new_fig, fig, ax, label):
if (new_fig == 1):
fig, ax = plt.subplots()
ax.set_title("Evolution of Client Distance to Origin by Trip ")
ax.plot(df.index, df['Distance'], label = 'Distance to the station ' + label)
df = df.drop_duplicates(subset=['TripID'])
for i in range(len(df.index)):
distance = df.at[df.index[i], 'Distance']
tripid = df.at[df.index[i], 'TripID']
ax.annotate("TripID: " + str(tripid), (df.index[i], distance))
ax.scatter(df.index[i], distance, color='blue')
ax.set_xlabel('Path coordinates')
ax.set_ylabel('Distance in meters')
ax.legend()
return fig, ax
####
#single VM - Single Plot + Compare Plot
def prep_n_mig(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID'], {'Mig_ID':'count'})
dfaux.rename(columns={'TripID':'TripID', 'Mig_ID':'Number of Migrations'},inplace=True)
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def n_mig(df):
fig, ax = plt.subplots()
ax.set_title("Number of Migrations by Trip ")
sn.barplot(x='TripID', y='Number of Migrations', hue='Class', palette=['C0','C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.set_xlabel('Trips')
ax.set_ylabel('Number of migrations')
ax.legend()
return 1
####
####
#single VM - Single Plot + Compare Plot
def prep_migtime(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID'], {'Mt_real':'sum', 'triptime': 'first'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def migtime(df):
fig, ax = plt.subplots()
ax.set_title("Time Spent Migrating vs Trip Time ")
ax.scatter(df['TripID'], df['triptime'], label = 'Total Trip Time', color='black')
sn.barplot(x='TripID', y='Mt_real', hue='Class', palette=['C0','C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.set_xlabel('Trips')
ax.set_ylabel('Time in Seconds')
ax.legend()
return 1
####
####
#single VM - Single Plot + Compare Plot
def prep_downtime(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID'], {'DT_real':'sum'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat = pd.concat(df_aux_list)
return dfconcat
def downtime(df):
fig, ax = plt.subplots()
ax.set_title("Downtime by Trip")
sn.barplot(x='TripID', y='DT_real', hue='Class', palette=['C0','C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax)
ax.set_xlabel('Trips')
ax.set_ylabel('Time in milliseconds')
ax.legend()
return 1
####
####
#single VM - Single Plot + Compare Plot
def prep_mte_vs_mtr(plot_dict, df_lst_files):
df_aux_list = list()
i = 0
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = df[df['TripID'].values == df['TripID'].values[0]]
dfaux = Vm_groupby(dfaux, ['TripID'], {'Mt_est':'first', 'Mt_real':'first'})
dfaux.insert(0, 'Class', 'Estimate Migration Time') # + Class
df_aux_list.append(dfaux)
i = i + 1
if(i==1): break # Remove if migration estimate method is to be compared!!!!!!
dfconcat = pd.concat(df_aux_list)
return dfconcat
def mte_vs_mtr(df):
fig, ax = plt.subplots(1,2)
fig.suptitle("Migration Time Estimate vs Real Migration Time")
sn.barplot(x='TripID', y='Mt_real', data=df, ax=ax[0])
sn.barplot(x='TripID', y='Mt_est', hue='Class', palette=['C1','C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'], data=df, ax=ax[1])
leg1 = mpatches.Patch(color='#1f77b4', label='Real Migration Time')
ax[0].legend(handles=[leg1])
ax[0].set_ylim([0, 60])
ax[1].legend()
ax[1].set_ylim([0, 60])
for i in range(2):
ax[i].set_xlabel('Single Machine')
ax[i].set_ylabel('Time in Seconds')
ax[i].set_xticks([])
return 1
####
####
#single VM - Single Plot + Compare Plot
def prep_transferred_data(plot_dict, df_lst_files):
df_aux_list = list()
for Class, df in zip(plot_dict['Classes'], df_lst_files):
dfaux = Vm_groupby(df, ['TripID'], {'transferred_dataGB':'sum'})
dfaux.insert(0, 'Class', Class)
df_aux_list.append(dfaux)
dfconcat =
|
pd.concat(df_aux_list)
|
pandas.concat
|
# Pandas and numpy for data manipulation
import pandas as pd
import numpy as np
np.random.seed(42)
import pickle
import sys
import configargparse
# Matplotlib and seaborn for plotting
#import matplotlib.pyplot as plt
#%matplotlib inline
print('importing')
#import matplotlib
#matplotlib.rcParams['font.size'] = 16
#matplotlib.rcParams['figure.figsize'] = (9, 9)
print('starting scipy')
# Scipy helper functions
from scipy.stats import percentileofscore
from scipy import stats
import scipy.stats as st
print('done with scipy')
# Standard ML Models for comparison
import sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import SVR
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
import xgboost
from xgboost import XGBClassifier
# Splitting data into training/testing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import scale
print('still importing')
from sklearn.datasets import make_circles
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot
from sklearn.metrics import roc_curve, auc, roc_auc_score
from sklearn.preprocessing import label_binarize
from functools import reduce
# Metrics
from sklearn.metrics import mean_squared_error, mean_absolute_error, median_absolute_error
# Distributions
import scipy
import pandas as pd
import numpy as np
import pymc3 as pm
print('ok done importing')
if __name__ == '__main__':
if len (sys.argv) < 10 :
print("Usage: python OperonMulti.py -f prediction_file -o operon_output -t threshold -g gff_file -d gff_delimiter\nNumber of arguements is " + str(len(sys.argv)))
sys.exit (1)
p = configargparse.ArgParser(description='given the six predictions from OperonSEQer and a threshold, this script strings together multigene operons')
p.add('-f', required=True, help='six input predictions',dest='Opreds')
p.add('-o', required=True, help='output name',dest='out')
p.add('-t', required=False, help='threshold for call', dest='thr', type=int, choices=range(1,7))
p.add('-g', requied=False, help='gff file for stringing together operons (no header)', dest='gff')
p.add('-d', requied=False, help='gff file delimiter (default as tab) - tab, space or comma', dest='deli')
#print('we made it in')
##this is the format of the gff file:
#0 Chromosome ena gene 190 255 . + . b0001
#1 Chromosome ena gene 337 2799 . + . b0002
args=p.parse_args()
preds=pd.read_csv(args.Opreds, sep='\t')
if args.thr:
if args.gff:
print('creating thresholded operon file')
else:
print('ERROR - if you want to string together operons, you need to provide a gff file')
sys.exit(1)
threshdict={1:'one',2:'two',3:'three',4:'four',5:'five',6:'six'}
thresh=threshdict[args.thr]
operons = preds[["SysName1", "SysName2", thresh]]
operons.columns=['Gene1','Gene2','pred']
if args.deli:
if args.deli=='tab':
gff=pd.read_csv('/home/rkrishn/projects/CERES/Raga/Genome/Escherichia_coli_str_k_12_substr_mg1655.ASM584v2.37_lite.txt', sep='\t', header=None)
elif args.deli=='comma':
gff=
|
pd.read_csv('/home/rkrishn/projects/CERES/Raga/Genome/Escherichia_coli_str_k_12_substr_mg1655.ASM584v2.37_lite.txt', sep=',', header=None)
|
pandas.read_csv
|
# standard libraries
import pandas as pd
# dash and plotly
import dash
from dash import dcc
from dash import html
from dash.dependencies import State, Input, Output
import dash_bootstrap_components as dbc
from dash import dash_table
# css for pictograms
FONT_AWESOME = "https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css"
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.BOOTSTRAP, FONT_AWESOME],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
app.title = "Research based Stock Trading Strategies"
# This is for gunicorn
server = app.server
# Side panel
# side panel header
portfolio_dropdown_text = html.P(
id="header-strategy",
children=["Trading", html.Br(), " Strategy"],
)
# button group for trading strategies
button_group = html.Div(
[
dbc.RadioItems(
id="radios",
inputClassName="btn-check",
labelClassName="btn btn-outline-primary",
labelCheckedClassName="active",
options=[
{"label": "Piotroski F-Score", "value": 'f_score'},
{"label": "PEAD", "value": 'pead'},
{"label": "Momentum", "value": 'momentum'},
{"label": "G-Score", "value": 'g_score'},
{"label": "Accruals Anatomy", "value": 'accruals'},
{"label": "Betting against Beta", "value": 'beta'},
{"label": "Equity Pairs", "value": 'pairs'},
],
value='f_score',
),
html.Div(id="output"),
],
className="radio-group",
)
info = html.Div(
id='info',
children=[
dcc.Markdown(
"All strategies are performed on companies based in the US handing in annual data to the [SEC](https://www.sec.gov/dera/data/financial-statement-data-sets.html)."
)
]
)
# bringing the side panel together
side_panel_layout = html.Div(
id="panel-side",
children=[
portfolio_dropdown_text,
button_group,
info,
],
)
# main panel
explanation = html.Div(
children=[
dcc.Markdown(
children="",
id='explanation-text'
)
],
className="six columns",
id='explanation',
)
stocks_header = html.Div(
dcc.Markdown(
"# Investments"
),
id='stocks-header'
)
long_header = html.Div(
dcc.Markdown(
"Long"
),
id='long-header'
)
long_stocks = html.Div(
id='long-stocks',
style={
'margin-top': '-30px'
}
)
short_header = html.Div(
dcc.Markdown(
"Short"
),
id='short-header'
)
short_stocks = html.Div(
id='short-stocks',
style={
'margin-top': '-30px'
}
)
long_short = html.Div(
[
stocks_header,
long_header,
long_stocks,
short_header,
short_stocks
],
className="six columns",
id='long-short',
)
book = html.Div([
explanation,
long_short
],
className='row',
id='book-text'
)
# bringing the main panel together
main_panel_layout = html.Div(
id="panel-upper-lower",
style={
'background-image': 'url("assets/book2.png")',
'background-repeat': 'no-repeat',
'background-position': 'center',
'background-size': '90%'
},
children=[
book
],
)
# bringing everything together, creating store-divs for used data
root_layout = html.Div(
id="root",
children=[
dcc.Store(
id="store-data",
data={}
),
dcc.Store(
id="store-backtests-stats",
data={}
),
dcc.Store(
id="store-backtests-prices",
data={}
),
dcc.Store(
id="store-backtests-weights",
data={}
),
side_panel_layout,
main_panel_layout,
],
)
# creating the app
app.layout = root_layout
def create_signal_df(df, signal):
"""
:param signal: Long or Short Signal
:return: Returns a DataFrame with all the stocks that have the given signal in df.
"""
# filter for signal
df = df[df.loc[:, 'Signal'] == signal]
# cut into 4 columns
df['Header'] = pd.qcut(df.index, 8, labels=False)
# create DataFrame with these 4 columns
list_dict = {}
for i in range(8):
list_nr = df.loc[:, 'Stock'][df.loc[:, 'Header'] == i].to_list()
list_dict[i] = list_nr
df_signal = pd.DataFrame.from_dict(list_dict, orient='index').T
return df_signal
# Callbacks
@app.callback(
[
Output('explanation-text', 'children'),
Output('long-stocks', 'children'),
Output('short-stocks', 'children')
],
[
Input('radios', 'value')
]
)
def create_explanation(strategy):
"""
:return: starting with clicking the Calculate button, the function returns the price data for the selected stocks
and saves it into the price-store
"""
long_df = pd.DataFrame()
short_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
from pandas._config import get_option
from pandas.plotting._matplotlib.boxplot import (
BoxPlot, boxplot, boxplot_frame, boxplot_frame_groupby)
from pandas.plotting._matplotlib.converter import deregister, register
from pandas.plotting._matplotlib.core import (
AreaPlot, BarhPlot, BarPlot, HexBinPlot, LinePlot, PiePlot, ScatterPlot)
from pandas.plotting._matplotlib.hist import (
HistPlot, KdePlot, hist_frame, hist_series)
from pandas.plotting._matplotlib.misc import (
andrews_curves, autocorrelation_plot, bootstrap_plot, lag_plot,
parallel_coordinates, radviz, scatter_matrix)
from pandas.plotting._matplotlib.timeseries import tsplot
from pandas.plotting._matplotlib.tools import table
if
|
get_option("plotting.matplotlib.register_converters")
|
pandas._config.get_option
|
#General
from datetime import datetime
from collections import Counter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import lines
import seaborn as sns
from seaborn import displot
from seaborn import lineplot
import statsmodels.api as sm
import talib as ta
from statsmodels.stats.proportion import *
from math import sqrt
import math
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from multiprocessing import Process
from scipy.spatial.distance import pdist, squareform
from itertools import compress
from plotnine import *
import talib as ta
date = pd.date_range(start = '2020-01-1 00:00:00', end = '2021-06-10 00:00:00', freq='min')
### DATA PREFERENCE ###
def chose_timeframe(timeframe):
if (timeframe == 'daily'):
df = pd.read_csv('/Users/manu/Quant Finance/OakTree & Lion/Binance_BTCUSDT_daily.csv')
df = df[0:1270]
df.info()
df.index = df.date
df.index = pd.to_datetime(df.index)
df['returns'] = df.close.pct_change()
df.info()
return df
elif (timeframe == 'hourly'):
df = pd.read_csv('/Users/manu/Quant Finance/OakTree & Lion/Binance_BTCUSDT_hourly.csv')
df = df[0:4806]
df.info()
df.index = df.date
df.index = pd.to_datetime(df.index)
df['returns'] = df.close.pct_change()
df.info()
return df
elif (timeframe == 'minute'):
df = pd.read_csv('/Users/manu/Quant Finance/OakTree & Lion/Binance_BTCUSDT_minute.csv')
df = df[0:752458]
df.info()
df.index = df.date
df.index =
|
pd.to_datetime(df.index)
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
import pdb, importlib, inspect, time, datetime, json
# from PyFin.api import advanceDateByCalendar
# from data.polymerize import DBPolymerize
from data.storage_engine import StorageEngine
import time
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from valuation_estimation import factor_valuation_estimation
from vision.db.signletion_engine import get_fin_consolidated_statements_pit, get_fundamentals, query
from vision.table.industry_daily import IndustryDaily
from vision.table.fin_cash_flow import FinCashFlow
from vision.table.fin_balance import FinBalance
from vision.table.fin_income import FinIncome
from vision.table.fin_indicator import FinIndicator
from vision.table.fin_indicator_ttm import FinIndicatorTTM
from vision.table.fin_income_ttm import FinIncomeTTM
from vision.table.fin_cash_flow_ttm import FinCashFlowTTM
from vision.db.signletion_engine import *
from vision.table.valuation import Valuation
from vision.table.industry import Industry
from vision.table.stk_daily_price import SkDailyPrice
from data.sqlengine import sqlEngine
from utilities.sync_util import SyncUtil
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# from ultron.cluster.invoke.cache_data import cache_data
class CalcEngine(object):
def __init__(self, name, url, methods=[
{'packet': 'valuation_estimation.factor_valuation_estimation', 'class': 'FactorValuationEstimation'}]):
self._name = name
self._methods = methods
self._url = url
def get_trade_date(self, trade_date, n, days=365):
"""
获取当前时间前n年的时间点,且为交易日,如果非交易日,则往前提取最近的一天。
:param days:
:param trade_date: 当前交易日
:param n:
:return:
"""
syn_util = SyncUtil()
trade_date_sets = syn_util.get_all_trades('001002', '19900101', trade_date)
trade_date_sets = trade_date_sets['TRADEDATE'].values
time_array = datetime.strptime(str(trade_date), "%Y%m%d")
time_array = time_array - timedelta(days=days) * n
date_time = int(datetime.strftime(time_array, "%Y%m%d"))
if str(date_time) < min(trade_date_sets):
# print('date_time %s is out of trade_date_sets' % date_time)
return str(date_time)
else:
while str(date_time) not in trade_date_sets:
date_time = date_time - 1
# print('trade_date pre %s year %s' % (n, date_time))
return str(date_time)
def _func_sets(self, method):
# 私有函数和保护函数过滤
return list(filter(lambda x: not x.startswith('_') and callable(getattr(method, x)), dir(method)))
def loading_data(self, trade_date):
"""
获取基础数据
按天获取当天交易日所有股票的基础数据
:param trade_date: 交易日
:return:
"""
time_array = datetime.strptime(trade_date, "%Y-%m-%d")
trade_date = datetime.strftime(time_array, '%Y%m%d')
engine = sqlEngine()
trade_date_pre = self.get_trade_date(trade_date, 1, days=30)
trade_date_1y = self.get_trade_date(trade_date, 1)
trade_date_3y = self.get_trade_date(trade_date, 3)
trade_date_4y = self.get_trade_date(trade_date, 4)
trade_date_5y = self.get_trade_date(trade_date, 5)
# report data
columns = ['COMPCODE', 'PUBLISHDATE', 'ENDDATE', 'symbol', 'company_id', 'trade_date']
balance_report = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.total_assets,
], dates=[trade_date])
if len(balance_report) <= 0 or balance_report is None:
balance_report = pd.DataFrame({'security_code': [], 'total_assets': []})
for column in columns:
if column in list(balance_report.keys()):
balance_report = balance_report.drop(column, axis=1)
balance_report = balance_report.rename(columns={
'total_assets': 'total_assets_report', # 资产总计
})
# valuation_report_sets = pd.merge(indicator_sets, balance_report, how='outer', on='security_code')
# MRQ data
cash_flow_mrq = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlow,
[FinCashFlow.cash_and_equivalents_at_end,
], dates=[trade_date])
if len(cash_flow_mrq) <= 0 or cash_flow_mrq is None:
cash_flow_mrq = pd.DataFrame({'security_code': [], 'cash_and_equivalents_at_end': []})
for column in columns:
if column in list(cash_flow_mrq.keys()):
cash_flow_mrq = cash_flow_mrq.drop(column, axis=1)
cash_flow_mrq = cash_flow_mrq.rename(columns={
'cash_and_equivalents_at_end': 'cash_and_equivalents_at_end', # 期末现金及现金等价物余额
})
balance_mrq = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.longterm_loan, # 短期借款
FinBalance.total_assets, # 资产总计
FinBalance.shortterm_loan, # 短期借款
FinBalance.equities_parent_company_owners,
# 归属于母公司股东权益合计
], dates=[trade_date])
if len(balance_mrq) <= 0 or balance_mrq is None:
balance_mrq = pd.DataFrame(
{'security_code': [], 'longterm_loan': [], 'total_assets': [], 'shortterm_loan': [],
'equities_parent_company_owners': []})
for column in columns:
if column in list(balance_mrq.keys()):
balance_mrq = balance_mrq.drop(column, axis=1)
balance_mrq = balance_mrq.rename(columns={
'shortterm_loan': 'shortterm_loan', # 短期借款
'longterm_loan': 'longterm_loan', # 长期借款
'total_assets': 'total_assets', # 资产总计
'equities_parent_company_owners': 'equities_parent_company_owners', # 归属于母公司股东权益合计
})
valuation_mrq = pd.merge(cash_flow_mrq, balance_mrq, on='security_code')
indicator_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIndicator,
[FinIndicator.np_cut,
], dates=[trade_date])
for col in columns:
if col in list(indicator_sets.keys()):
indicator_sets = indicator_sets.drop(col, axis=1)
# indicator_sets = indicator_sets.rename(columns={'EBIT': 'ebit_mrq'})
valuation_mrq = pd.merge(indicator_sets, valuation_mrq, how='outer', on='security_code')
income_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIncome,
[FinIncome.income_tax, # 所得税
], dates=[trade_date])
for col in columns:
if col in list(income_sets.keys()):
income_sets = income_sets.drop(col, axis=1)
valuation_mrq = pd.merge(income_sets, valuation_mrq, how='outer', on='security_code')
cash_flow_sets = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlow,
[FinCashFlow.fixed_assets_depreciation,
# 固定资产折旧
FinCashFlow.intangible_assets_amortization,
# 无形资产摊销
FinCashFlow.fix_intan_other_asset_acqui_cash,
# 购建固定资产、无形资产和其他...
FinCashFlow.defferred_expense_amortization,
# 长期待摊费用摊销
FinCashFlow.borrowing_repayment, # 偿还债务支付的现金
FinCashFlow.cash_from_borrowing, # 取得借款收到的现金
FinCashFlow.cash_from_bonds_issue,
# 发行债券所收到的现金
], dates=[trade_date])
for col in columns:
if col in list(cash_flow_sets.keys()):
cash_flow_sets = cash_flow_sets.drop(col, axis=1)
valuation_mrq = pd.merge(cash_flow_sets, valuation_mrq, how='outer', on='security_code')
balance_sets = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.shortterm_loan,
FinBalance.total_current_assets, # 流动资产合计
FinBalance.total_current_liability, # 流动负债合计
], dates=[trade_date])
for col in columns:
if col in list(balance_sets.keys()):
balance_sets = balance_sets.drop(col, axis=1)
valuation_mrq = pd.merge(balance_sets, valuation_mrq, how='outer', on='security_code')
balance_sets_pre = engine.fetch_fundamentals_pit_extend_company_id(FinBalance,
[FinBalance.total_current_assets, # 流动资产合计
FinBalance.total_current_liability,
# 流动负债合计
], dates=[trade_date_pre])
for col in columns:
if col in list(balance_sets_pre.keys()):
balance_sets_pre = balance_sets_pre.drop(col, axis=1)
balance_sets_pre = balance_sets_pre.rename(columns={
'total_current_assets': 'total_current_assets_pre',
'total_current_liability': 'total_current_liability_pre',
})
valuation_mrq = pd.merge(balance_sets_pre, valuation_mrq, how='outer', on='security_code')
# TTM data
# 总市值合并到TTM数据中,
cash_flow_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinCashFlowTTM,
[FinCashFlowTTM.net_operate_cash_flow,
], dates=[trade_date])
if len(cash_flow_ttm_sets) <= 0 or cash_flow_ttm_sets is None:
cash_flow_ttm_sets = pd.DataFrame({'security_code': [], 'net_operate_cash_flow': []})
for column in columns:
if column in list(cash_flow_ttm_sets.keys()):
cash_flow_ttm_sets = cash_flow_ttm_sets.drop(column, axis=1)
cash_flow_ttm_sets = cash_flow_ttm_sets.rename(columns={
'net_operate_cash_flow': 'net_operate_cash_flow', # 经营活动现金流量净额
})
indicator_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIndicatorTTM,
[FinIndicatorTTM.np_cut,
], dates=[trade_date_1y])
if len(indicator_ttm_sets) <= 0 or indicator_ttm_sets is None:
indicator_ttm_sets = pd.DataFrame({'security_code': [], 'np_cut': []})
for column in columns:
if column in list(indicator_ttm_sets.keys()):
indicator_ttm_sets = indicator_ttm_sets.drop(column, axis=1)
income_ttm_sets = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.net_profit,
FinIncomeTTM.np_parent_company_owners,
FinIncomeTTM.total_operating_revenue,
FinIncomeTTM.operating_revenue,
FinIncomeTTM.total_profit,
], dates=[trade_date])
if len(income_ttm_sets) <= 0 or income_ttm_sets is None:
income_ttm_sets = pd.DataFrame(
{'security_code': [], 'net_profit': [], 'np_parent_company_owners': [], 'total_operating_revenue': [],
'operating_revenue': [], 'total_profit': []})
for column in columns:
if column in list(income_ttm_sets.keys()):
income_ttm_sets = income_ttm_sets.drop(column, axis=1)
income_ttm_sets = income_ttm_sets.rename(columns={
'total_profit': 'total_profit', # 利润总额 ttm
'net_profit': 'net_profit', # 净利润
'np_parent_company_owners': 'np_parent_company_owners', # 归属于母公司所有者的净利润
'total_operating_revenue': 'total_operating_revenue', # 营业总收入
'operating_revenue': 'operating_revenue', # 营业收入
})
income_ttm_sets_3 = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.np_parent_company_owners,
], dates=[trade_date_3y])
if len(income_ttm_sets_3) <= 0 or income_ttm_sets_3 is None:
income_ttm_sets_3 = pd.DataFrame({'security_code': [], 'np_parent_company_owners': []})
for column in columns:
if column in list(income_ttm_sets_3.keys()):
income_ttm_sets_3 = income_ttm_sets_3.drop(column, axis=1)
income_ttm_sets_3 = income_ttm_sets_3.rename(columns={
'np_parent_company_owners': 'np_parent_company_owners_3', # 归属于母公司所有者的净利润
})
income_ttm_sets_5 = engine.fetch_fundamentals_pit_extend_company_id(FinIncomeTTM,
[FinIncomeTTM.np_parent_company_owners,
], dates=[trade_date_5y])
if len(income_ttm_sets_5) <= 0 or income_ttm_sets_5 is None:
income_ttm_sets_5 = pd.DataFrame({'security_code': [], 'np_parent_company_owners': []})
for column in columns:
if column in list(income_ttm_sets_5.keys()):
income_ttm_sets_5 = income_ttm_sets_5.drop(column, axis=1)
income_ttm_sets_5 = income_ttm_sets_5.rename(columns={
'np_parent_company_owners': 'np_parent_company_owners_5', # 归属于母公司所有者的净利润
})
valuation_ttm_sets = pd.merge(cash_flow_ttm_sets, income_ttm_sets, how='outer', on='security_code')
valuation_ttm_sets = pd.merge(valuation_ttm_sets, indicator_ttm_sets, how='outer', on='security_code')
valuation_ttm_sets = pd.merge(valuation_ttm_sets, income_ttm_sets_3, how='outer', on='security_code')
valuation_ttm_sets = pd.merge(valuation_ttm_sets, income_ttm_sets_5, how='outer', on='security_code')
# 流通市值,总市值
column = ['trade_date']
sk_daily_price_sets = get_fundamentals(query(SkDailyPrice.security_code,
SkDailyPrice.trade_date,
SkDailyPrice.tot_market_cap,
SkDailyPrice.circulating_market_cap
).filter(SkDailyPrice.trade_date.in_([trade_date])))
if len(sk_daily_price_sets) <= 0 or sk_daily_price_sets is None:
sk_daily_price_sets = pd.DataFrame({'security_code': [],
'tot_market_cap': [],
'circulating_market_cap': []})
for col in column:
if col in list(sk_daily_price_sets.keys()):
sk_daily_price_sets = sk_daily_price_sets.drop(col, axis=1)
# PS, PE, PB, PCF
column = ['trade_date']
valuation_sets = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe,
Valuation.ps,
Valuation.pb,
Valuation.pcf,
).filter(Valuation.trade_date.in_([trade_date])))
if len(valuation_sets) <= 0 or valuation_sets is None:
valuation_sets = pd.DataFrame({'security_code': [],
'pe': [],
'ps': [],
'pb': [],
'pcf': []})
for col in column:
if col in list(valuation_sets.keys()):
valuation_sets = valuation_sets.drop(col, axis=1)
trade_date_6m = self.get_trade_date(trade_date, 1, 180)
trade_date_3m = self.get_trade_date(trade_date, 1, 90)
# trade_date_2m = self.get_trade_date(trade_date, 1, 60)
trade_date_1m = self.get_trade_date(trade_date, 1, 30)
pe_set = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe,
).filter(Valuation.trade_date.in_([trade_date])))
if len(pe_set) <= 0 or pe_set is None:
pe_set = pd.DataFrame({'security_code': [], 'pe': []})
for col in column:
if col in list(pe_set.keys()):
pe_set = pe_set.drop(col, axis=1)
pe_sets_6m = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe)
.filter(Valuation.trade_date.between(trade_date_6m, trade_date)))
if len(pe_sets_6m) <= 0 or pe_sets_6m is None:
pe_sets_6m = pd.DataFrame({'security_code': [], 'pe': []})
for col in column:
if col in list(pe_sets_6m.keys()):
pe_sets_6m = pe_sets_6m.drop(col, axis=1)
pe_sets_6m = pe_sets_6m.groupby('security_code').mean().rename(columns={'pe': 'pe_mean_6m'})
pe_sets_3m = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe)
.filter(Valuation.trade_date.between(trade_date_3m, trade_date)))
if len(pe_sets_3m) <= 0 or pe_sets_3m is None:
pe_sets_3m = pd.DataFrame({'security_code': [], 'pe': []})
for col in column:
if col in list(pe_sets_3m.keys()):
pe_sets_3m = pe_sets_3m.drop(col, axis=1)
pe_sets_3m = pe_sets_3m.groupby('security_code').mean().rename(columns={'pe': 'pe_mean_3m'})
pe_sets_2m = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe)
.filter(Valuation.trade_date.between(trade_date_1m, trade_date)))
if len(pe_sets_2m) <= 0 or pe_sets_2m is None:
pe_sets_2m = pd.DataFrame({'security_code': [], 'pe': []})
for col in column:
if col in list(pe_sets_2m.keys()):
pe_sets_2m = pe_sets_2m.drop(col, axis=1)
pe_sets_2m = pe_sets_2m.groupby('security_code').mean().rename(columns={'pe': 'pe_mean_1m'})
pe_sets_1y = get_fundamentals(query(Valuation.security_code,
Valuation.trade_date,
Valuation.pe)
.filter(Valuation.trade_date.between(trade_date_1y, trade_date)))
if len(pe_sets_1y) <= 0 or pe_sets_1y is None:
pe_sets_1y = pd.DataFrame({'security_code': [], 'pe': []})
for col in column:
if col in list(pe_sets_1y.keys()):
pe_sets_1y = pe_sets_1y.drop(col, axis=1)
pe_sets_1y = pe_sets_1y.groupby('security_code').mean().rename(columns={'pe': 'pe_mean_1y'})
pe_sets = pd.merge(pe_sets_6m, pe_sets_3m, how='outer', on='security_code')
pe_sets = pd.merge(pe_sets, pe_sets_2m, how='outer', on='security_code')
pe_sets = pd.merge(pe_sets, pe_sets_1y, how='outer', on='security_code')
pe_sets = pd.merge(pe_sets, pe_set, how='outer', on='security_code')
industry_set = ['801010', '801020', '801030', '801040', '801050', '801080', '801110', '801120', '801130',
'801140', '801150', '801160', '801170', '801180', '801200', '801210', '801230', '801710',
'801720', '801730', '801740', '801750', '801760', '801770', '801780', '801790', '801880',
'801890']
column_sw = ['trade_date', 'symbol', 'company_id']
sw_indu = get_fundamentals_extend_internal(query(Industry.trade_date,
Industry.symbol,
Industry.isymbol)
.filter(Industry.trade_date.in_([trade_date])),
internal_type='symbol')
for col in column_sw:
if col in list(sw_indu.keys()):
sw_indu = sw_indu.drop(col, axis=1)
sw_indu = sw_indu[sw_indu['isymbol'].isin(industry_set)]
# valuation_sets = pd.merge(valuation_sets, indicator_sets, how='outer', on='security_code')
valuation_sets = pd.merge(valuation_sets, balance_report, how='outer', on='security_code')
valuation_sets =
|
pd.merge(valuation_sets, valuation_mrq, how='outer', on='security_code')
|
pandas.merge
|
'''
Class for a bipartite network
'''
from pandas.core.indexes.base import InvalidIndexError
from tqdm.auto import tqdm
import numpy as np
# from numpy_groupies.aggregate_numpy import aggregate
import pandas as pd
from pandas import DataFrame, Int64Dtype
# from scipy.sparse.csgraph import connected_components
import warnings
import bipartitepandas as bpd
from bipartitepandas import col_order, update_dict, to_list, logger_init, col_dict_optional_cols, aggregate_transform, ParamsDict
import igraph as ig
def recollapse_loop(force=False):
'''
Decorator function that accounts for issues with selecting ids under particular restrictions for collapsed data. In particular, looking at a restricted set of observations can require recollapsing data, which can they change which observations meet the given restrictions. This function loops until stability is achieved.
Arguments:
force (bool): if True, force loop for non-collapsed data
'''
def recollapse_loop_inner(func):
def recollapse_loop_inner_inner(*args, **kwargs):
# Do function
self = args[0]
frame = func(*args, **kwargs)
if force or isinstance(self, (bpd.BipartiteLongCollapsed, bpd.BipartiteEventStudyCollapsed)):
kwargs['copy'] = False
if len(frame) != len(self):
# If the frame changes, we have to re-loop until stability
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
while len(frame) != len(frame_prev):
frame_prev = frame
frame = func(frame_prev, *args[1:], **kwargs)
return frame
return recollapse_loop_inner_inner
return recollapse_loop_inner
# Define default parameter dictionaries
_clean_params_default = ParamsDict({
'connectedness': ('connected', 'set', ['connected', 'leave_one_observation_out', 'leave_one_firm_out', None],
'''
(default='connected') When computing largest connected set of firms: if 'connected', keep observations in the largest connected set of firms; if 'leave_one_observation_out', keep observations in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', keep observations in the largest leave-one-firm-out connected set; if None, keep all observations.
'''),
'component_size_variable': ('firms', 'set', ['len', 'length', 'firms', 'workers', 'stayers', 'movers'],
'''
(default='firms') How to determine largest connected component. Options are 'len'/'length' (length of frame), 'firms' (number of unique firms), 'workers' (number of unique workers), 'stayers' (number of unique stayers), and 'movers' (number of unique movers).
'''),
'i_t_how': ('max', 'set', ['max', 'sum', 'mean'],
'''
(default='max') When dropping i-t duplicates: if 'max', keep max paying job; if 'sum', sum over duplicate worker-firm-year observations, then take the highest paying worker-firm sum; if 'mean', average over duplicate worker-firm-year observations, then take the highest paying worker-firm average. Note that if multiple time and/or firm columns are included (as in event study format), then data is converted to long, cleaned, then reconverted to its original format.
'''),
'drop_multiples': (False, 'type', bool,
'''
(default=False) If True, rather than collapsing over spells, drop any spells with multiple observations (this is for computational efficiency when re-collapsing data for biconnected components).
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) If False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'force': (True, 'type', bool,
'''
(default=True) If True, force all cleaning methods to run; much faster if set to False.
'''),
'copy': (True, 'type', bool,
'''
(default=True) If False, avoid copying data when possible.
''')
})
def clean_params(update_dict={}):
'''
Dictionary of default clean_params.
Arguments:
update_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of clean_params
'''
new_dict = _clean_params_default.copy()
new_dict.update(update_dict)
return new_dict
_cluster_params_default = ParamsDict({
'measures': (bpd.measures.cdfs(), 'list_of_type', (bpd.measures.cdfs, bpd.measures.moments),
'''
(default=bpd.measures.cdfs()) How to compute measures for clustering. Options can be seen in bipartitepandas.measures.
'''),
'grouping': (bpd.grouping.kmeans(), 'type', (bpd.grouping.kmeans, bpd.grouping.quantiles),
'''
(default=bpd.grouping.kmeans()) How to group firms based on measures. Options can be seen in bipartitepandas.grouping.
'''),
'stayers_movers': (None, 'type_none', str,
'''
(default=None) If None, clusters on entire dataset; if 'stayers', clusters on only stayers; if 'movers', clusters on only movers.
'''),
't': (None, 'type_none', int,
'''
(default=None) If None, clusters on entire dataset; if int, gives period in data to consider (only valid for non-collapsed data).
'''),
'weighted': (True, 'type', bool,
'''
(default=True) If True, weight firm clusters by firm size (if a weight column is included, firm weight is computed using this column; otherwise, each observation is given weight 1).
'''),
'dropna': (False, 'type', bool,
'''
(default=False) If True, drop observations where firms aren't clustered; if False, keep all observations.
'''),
'clean_params': (None, 'type_none', bpd.ParamsDict,
'''
(default=None) Dictionary of parameters for cleaning. This is used when observations get dropped because they were not clustered. Default is None, which sets connectedness to be the connectedness measure previously used. Run bpd.clean_params().describe_all() for descriptions of all valid parameters.
'''),
'is_sorted': (False, 'type', bool,
'''
(default=False) For event study format. If False, dataframe will be sorted by i (and t, if included). Set to True if already sorted.
'''),
'copy': (True, 'type', bool,
'''
(default=True) If False, avoid copy.
''')
})
def cluster_params(update_dict={}):
'''
Dictionary of default cluster_params.
Arguments:
update_dict (dict): user parameter values
Returns:
(ParamsDict) dictionary of cluster_params
'''
new_dict = _cluster_params_default.copy()
new_dict.update(update_dict)
return new_dict
class BipartiteBase(DataFrame):
'''
Base class for BipartitePandas, where BipartitePandas gives a bipartite network of firms and workers. Contains generalized methods. Inherits from DataFrame.
Arguments:
*args: arguments for Pandas DataFrame
columns_req (list): required columns (only put general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'; then put the joint columns in reference_dict)
columns_opt (list): optional columns (only put general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'; then put the joint columns in reference_dict)
columns_contig (dictionary): columns requiring contiguous ids linked to boolean of whether those ids are contiguous, or None if column(s) not included, e.g. {'i': False, 'j': False, 'g': None} (only put general column names for joint columns)
reference_dict (dict): clarify which columns are associated with a general column name, e.g. {'i': 'i', 'j': ['j1', 'j2']}
col_dtype_dict (dict): link column to datatype
col_dict (dict or None): make data columns readable. Keep None if column names already correct
include_id_reference_dict (bool): if True, create dictionary of Pandas dataframes linking original id values to contiguous id values
log (bool): if True, will create log file(s)
**kwargs: keyword arguments for Pandas DataFrame
'''
# Attributes, required for Pandas inheritance
_metadata = ['col_dict', 'reference_dict', 'id_reference_dict', 'col_dtype_dict', 'columns_req', 'columns_opt', 'columns_contig', 'default_cluster', 'dtype_dict', 'default_clean', 'connectedness', 'no_na', 'no_duplicates', 'i_t_unique', '_log_on_indicator', '_level_fn_dict']
def __init__(self, *args, columns_req=[], columns_opt=[], columns_contig=[], reference_dict={}, col_dtype_dict={}, col_dict=None, include_id_reference_dict=False, log=True, **kwargs):
# Initialize DataFrame
super().__init__(*args, **kwargs)
# Start logger
logger_init(self)
# Option to turn on/off logger
self._log_on_indicator = log
# self.log('initializing BipartiteBase object', level='info')
if len(args) > 0 and isinstance(args[0], BipartiteBase):
# Note that isinstance works for subclasses
self._set_attributes(args[0], include_id_reference_dict)
else:
self.columns_req = ['i', 'j', 'y'] + columns_req
self.columns_opt = ['g', 'm'] + columns_opt
self.columns_contig = update_dict({'i': False, 'j': False, 'g': None}, columns_contig)
self.reference_dict = update_dict({'i': 'i', 'm': 'm'}, reference_dict)
self._reset_id_reference_dict(include_id_reference_dict) # Link original id values to contiguous id values
self.col_dtype_dict = update_dict({'i': 'int', 'j': 'int', 'y': 'float', 't': 'int', 'g': 'int', 'm': 'int'}, col_dtype_dict)
default_col_dict = {}
for col in to_list(self.columns_req):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = subcol
for col in to_list(self.columns_opt):
for subcol in to_list(self.reference_dict[col]):
default_col_dict[subcol] = None
# Create self.col_dict
self.col_dict = col_dict_optional_cols(default_col_dict, col_dict, self.columns, optional_cols=[self.reference_dict[col] for col in self.columns_opt])
# Set attributes
self._reset_attributes()
# Dictionary of logger functions based on level
self._level_fn_dict = {
'debug': self.logger.debug,
'info': self.logger.info,
'warning': self.logger.warning,
'error': self.logger.error,
'critical': self.logger.critical
}
self.dtype_dict = {
'int': ['int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'float': ['float', 'float8', 'float16', 'float32', 'float64', 'float128', 'int', 'int8', 'int16', 'int32', 'int64', 'Int64'],
'str': 'str'
}
# self.log('BipartiteBase object initialized', level='info')
@property
def _constructor(self):
'''
For inheritance from Pandas.
'''
return BipartiteBase
def copy(self):
'''
Return copy of self.
Returns:
bdf_copy (BipartiteBase): copy of instance
'''
df_copy = DataFrame(self, copy=True)
# Set logging on/off depending on current selection
bdf_copy = self._constructor(df_copy, log=self._log_on_indicator)
# This copies attribute dictionaries, default copy does not
bdf_copy._set_attributes(self)
return bdf_copy
def log_on(self, on=True):
'''
Toggle logger on or off.
Arguments:
on (bool): if True, turn logger on; if False, turn logger off
'''
self._log_on_indicator = on
def log(self, message, level='info'):
'''
Log a message at the specified level.
Arguments:
message (str): message to log
level (str): logger level. Options, in increasing severity, are 'debug', 'info', 'warning', 'error', and 'critical'.
'''
if self._log_on_indicator:
# Log message
self._level_fn_dict[level](message)
def summary(self):
'''
Print summary statistics. This uses class attributes. To run a diagnostic to verify these values, run `.diagnostic()`.
'''
ret_str = ''
y = self.loc[:, self.reference_dict['y']].to_numpy()
mean_wage = np.mean(y)
median_wage = np.median(y)
max_wage = np.max(y)
min_wage = np.min(y)
var_wage = np.var(y)
ret_str += 'format: {}\n'.format(type(self).__name__)
ret_str += 'number of workers: {}\n'.format(self.n_workers())
ret_str += 'number of firms: {}\n'.format(self.n_firms())
ret_str += 'number of observations: {}\n'.format(len(self))
ret_str += 'mean wage: {}\n'.format(mean_wage)
ret_str += 'median wage: {}\n'.format(median_wage)
ret_str += 'min wage: {}\n'.format(min_wage)
ret_str += 'max wage: {}\n'.format(max_wage)
ret_str += 'var(wage): {}\n'.format(var_wage)
ret_str += 'no NaN values: {}\n'.format(self.no_na)
ret_str += 'no duplicates: {}\n'.format(self.no_duplicates)
ret_str += 'i-t (worker-year) observations unique (None if t column(s) not included): {}\n'.format(self.i_t_unique)
for contig_col, is_contig in self.columns_contig.items():
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, is_contig)
ret_str += 'connectedness (None if ignoring connectedness): {}'.format(self.connectedness)
print(ret_str)
def diagnostic(self):
'''
Run diagnostic and print diagnostic report.
'''
ret_str = '----- General Diagnostic -----\n'
##### Sorted by i (and t, if included) #####
sort_order = ['i']
if self._col_included('t'):
# If t column
sort_order.append(to_list(self.reference_dict['t'])[0])
is_sorted = (self.loc[:, sort_order] == self.loc[:, sort_order].sort_values(sort_order)).to_numpy().all()
ret_str += 'sorted by i (and t, if included): {}\n'.format(is_sorted)
##### No NaN values #####
# Source: https://stackoverflow.com/a/29530601/17333120
no_na = (not self.isnull().to_numpy().any())
ret_str += 'no NaN values: {}\n'.format(no_na)
##### No duplicates #####
# https://stackoverflow.com/a/50243108/17333120
no_duplicates = (not self.duplicated().any())
ret_str += 'no duplicates: {}\n'.format(no_duplicates)
##### i-t unique #####
no_i_t_duplicates = (not self.duplicated(subset=sort_order).any())
ret_str += 'i-t (worker-year) observations unique (if t column(s) not included, then i observations unique): {}\n'.format(no_i_t_duplicates)
##### Contiguous ids #####
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
contig_ids = self.unique_ids(contig_col)
is_contig = (len(contig_ids) == (max(contig_ids) + 1))
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, is_contig)
else:
ret_str += 'contiguous {} ids (None if not included): {}\n'.format(contig_col, None)
##### Connectedness #####
is_connected_dict = {
None: lambda : None,
'connected': lambda : self._construct_graph(self.connectedness).is_connected(),
'leave_one_observation_out': lambda: (len(self) == len(self._conset(connectedness=self.connectedness))),
'leave_one_firm_out': lambda: (len(self) == len(self._conset(connectedness=self.connectedness)))
}
is_connected = is_connected_dict[self.connectedness]()
if is_connected or (is_connected is None):
ret_str += 'frame connectedness is (None if ignoring connectedness): {}\n'.format(self.connectedness)
else:
ret_str += 'frame failed connectedness: {}\n'.format(self.connectedness)
if self._col_included('m'):
##### m column #####
m_correct = (self.loc[:, 'm'] == self.gen_m(force=True).loc[:, 'm']).to_numpy().all()
ret_str += "'m' column correct (None if not included): {}\n".format(m_correct)
else:
ret_str += "'m' column correct (None if not included): {}".format(None)
print(ret_str)
def unique_ids(self, id_col):
'''
Unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(NumPy Array): unique ids
'''
id_lst = []
for id_subcol in to_list(self.reference_dict[id_col]):
id_lst += list(self.loc[:, id_subcol].unique())
return np.array(list(set(id_lst)))
def n_unique_ids(self, id_col):
'''
Number of unique ids in column.
Arguments:
id_col (str): column to check ids ('i', 'j', or 'g'). Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(int): number of unique ids
'''
return len(self.unique_ids(id_col))
def n_workers(self):
'''
Get the number of unique workers.
Returns:
(int): number of unique workers
'''
return self.loc[:, 'i'].nunique()
def n_firms(self):
'''
Get the number of unique firms.
Returns:
(int): number of unique firms
'''
return self.n_unique_ids('j')
def n_clusters(self):
'''
Get the number of unique clusters.
Returns:
(int or None): number of unique clusters, None if not clustered
'''
if not self._col_included('g'): # If cluster column not in dataframe
return None
return self.n_unique_ids('g')
def original_ids(self, copy=True):
'''
Return self merged with original column ids.
Arguments:
copy (bool): if False, avoid copy
Returns:
(BipartiteBase or None): copy of self merged with original column ids, or None if id_reference_dict is empty
'''
frame = pd.DataFrame(self, copy=copy)
if self.id_reference_dict:
for id_col, reference_df in self.id_reference_dict.items():
if len(reference_df) > 0: # Make sure non-empty
for id_subcol in to_list(self.reference_dict[id_col]):
try:
frame = frame.merge(reference_df.loc[:, ['original_ids', 'adjusted_ids_' + str(len(reference_df.columns) - 1)]].rename({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(len(reference_df.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
except TypeError: # Int64 error with NaNs
frame.loc[:, id_col] = frame.loc[:, id_col].astype('Int64', copy=False)
frame = frame.merge(reference_df.loc[:, ['original_ids', 'adjusted_ids_' + str(len(reference_df.columns) - 1)]].rename({'original_ids': 'original_' + id_subcol, 'adjusted_ids_' + str(len(reference_df.columns) - 1): id_subcol}, axis=1), how='left', on=id_subcol)
# else:
# # If no changes, just make original_id be the same as the current id
# for id_subcol in to_list(self.reference_dict[id_col]):
# frame['original_' + id_subcol] = frame[id_subcol]
return frame
else:
warnings.warn('id_reference_dict is empty. Either your id columns are already correct, or you did not specify `include_id_reference_dict=True` when initializing your BipartitePandas object')
return None
def _set_attributes(self, frame, no_dict=False, include_id_reference_dict=False):
'''
Set class attributes to equal those of another BipartitePandas object.
Arguments:
frame (BipartitePandas): BipartitePandas object whose attributes to use
no_dict (bool): if True, only set booleans, no dictionaries
include_id_reference_dict (bool): if True, create dictionary of Pandas dataframes linking original id values to contiguous id values
'''
# Dictionaries
if not no_dict:
self.columns_req = frame.columns_req.copy()
self.columns_opt = frame.columns_opt.copy()
self.reference_dict = frame.reference_dict.copy()
self.col_dtype_dict = frame.col_dtype_dict.copy()
self.col_dict = frame.col_dict.copy()
self.columns_contig = frame.columns_contig.copy() # Required, even if no_dict
if frame.id_reference_dict:
self.id_reference_dict = {}
# Must do a deep copy
for id_col, reference_df in frame.id_reference_dict.items():
self.id_reference_dict[id_col] = reference_df.copy()
else:
# This is if the original dataframe DIDN'T have an id_reference_dict (but the new dataframe may or may not)
self._reset_id_reference_dict(include_id_reference_dict)
# # Logger
# self.logger = frame.logger
# Booleans
self.connectedness = frame.connectedness # If False, not connected; if 'connected', all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
self.no_na = frame.no_na # If True, no NaN observations in the data
self.no_duplicates = frame.no_duplicates # If True, no duplicate rows in the data
self.i_t_unique = frame.i_t_unique # If True, each worker has at most one observation per period
def _reset_attributes(self, columns_contig=True, connected=True, no_na=True, no_duplicates=True, i_t_unique=True):
'''
Reset class attributes conditions to be False/None.
Arguments:
columns_contig (bool): if True, reset self.columns_contig
connected (bool): if True, reset self.connectedness
no_na (bool): if True, reset self.no_na
no_duplicates (bool): if True, reset self.no_duplicates
i_t_unique (bool): if True, reset self.i_t_unique
Returns:
self (BipartiteBase): self with reset class attributes
'''
if columns_contig:
for contig_col in self.columns_contig.keys():
if self._col_included(contig_col):
self.columns_contig[contig_col] = False
else:
self.columns_contig[contig_col] = None
if connected:
self.connectedness = None # If False, not connected; if 'connected', all observations are in the largest connected set of firms; if 'leave_one_observation_out', observations are in the largest leave-one-observation-out connected set; if 'leave_one_firm_out', observations are in the largest leave-one-firm-out connected set; if None, connectedness ignored
if no_na:
self.no_na = False # If True, no NaN observations in the data
if no_duplicates:
self.no_duplicates = False # If True, no duplicate rows in the data
if i_t_unique:
self.i_t_unique = None # If True, each worker has at most one observation per period; if None, t column not included (set to False later in method if t column included)
# Verify whether period included
if self._col_included('t'):
self.i_t_unique = False
# logger_init(self)
return self
def _reset_id_reference_dict(self, include=False):
'''
Reset id_reference_dict.
Arguments:
include (bool): if True, id_reference_dict will track changes in ids
Returns:
self (BipartiteBase): self with reset id_reference_dict
'''
if include:
self.id_reference_dict = {id_col: pd.DataFrame() for id_col in self.reference_dict.keys()}
else:
self.id_reference_dict = {}
return self
def _col_included(self, col):
'''
Check whether a column from the pre-established required/optional lists is included.
Arguments:
col (str): column to check. Use general column names for joint columns, e.g. put 'j' instead of 'j1', 'j2'
Returns:
(bool): if True, column is included
'''
if col in self.columns_req + self.columns_opt:
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
return False
return True
return False
def _included_cols(self, flat=False):
'''
Get all columns included from the pre-established required/optional lists.
Arguments:
flat (bool): if False, uses general column names for joint columns, e.g. returns 'j' instead of 'j1', 'j2'.
Returns:
all_cols (list): included columns
'''
all_cols = []
for col in self.columns_req + self.columns_opt:
include = True
for subcol in to_list(self.reference_dict[col]):
if self.col_dict[subcol] is None:
include = False
break
if include:
if flat:
all_cols += to_list(self.reference_dict[col])
else:
all_cols.append(col)
return all_cols
def drop(self, indices, axis=0, inplace=False, allow_required=False):
'''
Drop indices along axis.
Arguments:
indices (int or str, optionally as a list): row(s) or column(s) to drop. For columns, use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be dropped
axis (int): 0 to drop rows, 1 to drop columns
inplace (bool): if True, modify in-place
allow_required (bool): if True, allow to drop required columns
Returns:
frame (BipartiteBase): BipartiteBase with dropped indices
'''
frame = self
if axis == 1:
for col in to_list(indices):
if col in frame.columns or col in frame.columns_req or col in frame.columns_opt:
if col in frame.columns_opt: # If column optional
for subcol in to_list(frame.reference_dict[col]):
if inplace:
DataFrame.drop(frame, subcol, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, subcol, axis=1, inplace=False)
frame.col_dict[subcol] = None
if col in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col] = pd.DataFrame()
elif col not in frame._included_cols() and col not in frame._included_cols(flat=True): # If column is not pre-established
if inplace:
DataFrame.drop(frame, col, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, col, axis=1, inplace=False)
else:
if not allow_required:
warnings.warn("{} is either (a) a required column and cannot be dropped or (b) a subcolumn that can be dropped, but only by specifying the general column name (e.g. use 'g' instead of 'g1' or 'g2')".format(col))
else:
if inplace:
DataFrame.drop(frame, col, axis=1, inplace=True)
else:
frame = DataFrame.drop(frame, col, axis=1, inplace=False)
else:
warnings.warn('{} is not in data columns'.format(col))
elif axis == 0:
if inplace:
DataFrame.drop(frame, indices, axis=0, inplace=True)
else:
frame = DataFrame.drop(frame, indices, axis=0, inplace=False)
frame._reset_attributes()
# frame.clean_data({'connectedness': frame.connectedness})
return frame
def rename(self, rename_dict, inplace=True):
'''
Rename a column.
Arguments:
rename_dict (dict): key is current column name, value is new column name. Use general column names for joint columns, e.g. put 'g' instead of 'g1', 'g2'. Only optional columns may be renamed
inplace (bool): if True, modify in-place
Returns:
frame (BipartiteBase): BipartiteBase with renamed columns
'''
if inplace:
frame = self
else:
frame = self.copy()
for col_cur, col_new in rename_dict.items():
if col_cur in frame.columns or col_cur in frame.columns_req or col_cur in frame.columns_opt:
if col_cur in self.columns_opt: # If column optional
if len(to_list(self.reference_dict[col_cur])) > 1:
for i, subcol in enumerate(to_list(self.reference_dict[col_cur])):
DataFrame.rename(frame, {subcol: col_new + str(i + 1)}, axis=1, inplace=True)
frame.col_dict[subcol] = None
else:
DataFrame.rename(frame, {col_cur: col_new}, axis=1, inplace=True)
frame.col_dict[col_cur] = None
if col_cur in frame.columns_contig.keys(): # If column contiguous
frame.columns_contig[col_cur] = None
if frame.id_reference_dict: # If id_reference_dict has been initialized
frame.id_reference_dict[col_cur] = pd.DataFrame()
elif col_cur not in frame._included_cols() and col_cur not in frame._included_cols(flat=True): # If column is not pre-established
|
DataFrame.rename(frame, {col_cur: col_new}, axis=1, inplace=True)
|
pandas.DataFrame.rename
|
from sklearn import metrics
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import roc_curve, auc
from sklearn.ensemble import BaggingClassifier
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import seaborn as sns
# import rpy2
# import rpy2.robjects as robjects # robjects as python objects
# import rpy2.robjects.packages as rpackages # helps download and import r packages
import os
import pandas as pd
import numpy as np
from collections import OrderedDict
import pickle
import MLPipeline
import AppConfig as app_config
DATA_FLD_NAME = app_config.CLF_FLD_NAME
DATA_FILE_NAME_PRFX = app_config.CLF_FLD_PREFIX
# BAGGING_FLD_NAME = "bagging"
RESULTS_FLD_NAME = app_config.CLF_RESULTS_FLD_NAME
# figcount = 0
# Figureset = []
class Evaluation:
def __init__(self, ml_pipeline: MLPipeline):
self.ml_pipeline = ml_pipeline
self.jlogger = self.ml_pipeline.jlogger
self.figcount = 0
self.Figureset = []
def evaluate_and_save_results(self, model, fld_name):
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
x_test = self.ml_pipeline.x_test
y_test = self.ml_pipeline.y_test
fg_fld_name = os.path.basename(self.ml_pipeline.fg_clf_fld_path)
fld_path = self.ml_pipeline.job_data['job_data_path']
fld_path = os.path.join(*[fld_path, DATA_FLD_NAME, fg_fld_name, fld_name])
os.makedirs(fld_path, exist_ok=True)
clf_pkl_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + fld_name + ".pkl")
train_preds_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + "train_preds.csv")
test_preds_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + "test_preds.csv")
with open(clf_pkl_path, 'wb') as f:
pickle.dump(model, f)
y_train_preds = model.predict(x_train)
y_train_probas = model.predict_proba(x_train)
y_test_preds = model.predict(x_test)
y_test_probas = model.predict_proba(x_test)
df_train = pd.DataFrame([], columns=['Prediction', 'Ground Truth', 'Prob 0', 'Prob 1'])
df_train['Prediction'] = y_train_preds
df_train['Ground Truth'] = y_train
df_train['Prob 0'] = y_train_probas[:, 0]
df_train['Prob 1'] = y_train_probas[:, 1]
df_train.to_csv(train_preds_path, index=False)
df_test = pd.DataFrame([], columns=['Prediction', 'Ground Truth', 'Prob 0', 'Prob 1'])
df_test['Prediction'] = y_test_preds
df_test['Ground Truth'] = y_test
df_test['Prob 0'] = y_test_probas[:, 0]
df_test['Prob 1'] = y_test_probas[:, 1]
df_test.to_csv(test_preds_path, index=False)
self.save_results(fld_name, df_train, df_test)
def save_results(self, fld_name, df_train, df_test):
# global Figureset
y_train = df_train['Ground Truth']
y_train_preds = df_train['Prediction']
train_prob0 = df_train['Prob 0'].to_numpy()
train_prob1 = df_train['Prob 1'].to_numpy()
train_np_yprobas = np.c_[train_prob0, train_prob1]
y_test = df_test['Ground Truth']
y_test_preds = df_test['Prediction']
test_prob0 = df_test['Prob 0'].to_numpy()
test_prob1 = df_test['Prob 1'].to_numpy()
test_np_yprobas = np.c_[test_prob0, test_prob1]
train_fld_name = fld_name + "_train"
test_fld_name = fld_name + "_test"
self.save_all_model_plots(y_train_preds, y_train, train_np_yprobas, train_fld_name)
self.save_all_model_plots(y_test_preds, y_test, test_np_yprobas, test_fld_name)
self.save_plots_pdf(fld_name)
def save_plots_pdf(self, fld_name):
fg_fld_name = os.path.basename(self.ml_pipeline.fg_clf_fld_path)
fld_path = self.ml_pipeline.job_data['job_results_path']
fld_path = os.path.join(*[fld_path, fg_fld_name, RESULTS_FLD_NAME])
os.makedirs(fld_path, exist_ok=True)
pdf_file_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + fld_name + ".pdf")
self.plot_all_figures(pdf_file_path)
def save_all_model_plots(self, np_ypreds, np_ytrues, np_yprobas, title):
self.print_confusion_matrix(np_ytrues, np_ypreds, "Confusion Matrix - " + title)
#gt = np_ytrues.tolist()
#probs = np_yprobas[:, 1].tolist()
# fpr, tpr = self.get_smoothened_fpr_tpr_from_pROC(gt, probs)
# self.plot_r_smoothened_curve(fpr, tpr, "ROC Curve - " + title)
self.print_roc_curve(np_ytrues, np_ypreds, np_yprobas, "ROC Curve - " + title)
res = self.evaluate_model(np_ypreds, np_ytrues, np_yprobas)
self.jlogger.info("Evaluation of {} {}".format(title, res))
def evaluate_all_bagged_clf(self, bc, n, x_test, y_test):
res_list = []
iters = []
res_dict_keys = {}
for i in range(n):
if i % 100 == 0:
self.jlogger.debug("Completed Bagging Iter: " + str(i))
clf = bc.estimators_[i]
ypred = clf.predict(x_test)
yproba = clf.predict_proba(x_test)
res = self.evaluate_model(ypred, y_test, yproba)
res_list.append(list(res.values()))
res_dict_keys = list(res.keys())
iters.append(i + 1)
# print("-----------------------")
results = pd.DataFrame(res_list, columns=res_dict_keys)
return results
def evaluate_and_save_bagging_results(self, bc, n, x_test, y_test, title, fld_path):
# # resetting all figures
# self.figcount = 0
# self.Figureset = []
# evaluate aggregated bagging clf
bc_ypred = bc.predict(x_test)
bc_yproba = bc.predict_proba(x_test)
df_test = pd.DataFrame([], columns=['Prediction', 'Ground Truth', 'Prob 0', 'Prob 1'])
df_test['Prediction'] = bc_ypred
df_test['Ground Truth'] = y_test
df_test['Prob 0'] = bc_yproba[:, 0]
df_test['Prob 1'] = bc_yproba[:, 1]
test_preds_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + title + ".csv")
df_test.to_csv(test_preds_path, index=False)
self.save_all_model_plots(bc_ypred, y_test, bc_yproba, title)
# evaluate each of the bagged classifier
results = self.evaluate_all_bagged_clf(bc, n, x_test, y_test)
self.plot_box_plot_results(results, title)
# fg_fld_name = os.path.basename(self.ml_pipeline.fg_clf_fld_path)
# res_fld_path = self.ml_pipeline.job_data['job_results_path']
# res_fld_path = os.path.join(*[res_fld_path, fg_fld_name, RESULTS_FLD_NAME])
#
# pdf_file_path = os.path.join(res_fld_path, DATA_FILE_NAME_PRFX + title + ".pdf")
# self.plot_all_figures(pdf_file_path)
iter_results_csv_path = os.path.join(fld_path,
DATA_FILE_NAME_PRFX + title + " Iteration wise Evaluation Results.csv")
stats_results_csv_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + title + " Evaluation Stats.csv")
results.to_csv(iter_results_csv_path, float_format='%.4f')
results.describe().to_csv(stats_results_csv_path, float_format='%.4f')
def evaluate_bagging_model(self, clf, n, fld_name):
# resetting all figures
self.figcount = 0
self.Figureset = []
# convert to int in case float
n = int(n)
fg_fld_name = os.path.basename(self.ml_pipeline.fg_clf_fld_path)
fld_path = self.ml_pipeline.job_data['job_data_path']
fld_path = os.path.join(*[fld_path, DATA_FLD_NAME, fg_fld_name, fld_name])
os.makedirs(fld_path, exist_ok=True)
x_train = self.ml_pipeline.x_train
y_train = self.ml_pipeline.y_train
x_test = self.ml_pipeline.x_test
y_test = self.ml_pipeline.y_test
bc = BaggingClassifier(base_estimator=clf, n_estimators=n, bootstrap=True, random_state=42)
bc.fit(x_train, y_train)
clf_pkl_path = os.path.join(fld_path, DATA_FILE_NAME_PRFX + fld_name + ".pkl")
with open(clf_pkl_path, 'wb') as f:
pickle.dump(bc, f)
self.evaluate_and_save_bagging_results(bc, n, x_train, y_train, "Training - " + fld_name, fld_path)
self.evaluate_and_save_bagging_results(bc, n, x_test, y_test, "Testing - " + fld_name, fld_path)
fg_fld_name = os.path.basename(self.ml_pipeline.fg_clf_fld_path)
res_fld_path = self.ml_pipeline.job_data['job_results_path']
res_fld_path = os.path.join(*[res_fld_path, fg_fld_name, RESULTS_FLD_NAME])
pdf_file_path = os.path.join(res_fld_path, DATA_FILE_NAME_PRFX + fld_name + ".pdf")
self.plot_all_figures(pdf_file_path)
def evaluate_model(self, ytest_pred, ytest, ytest_probas):
"""
This method evaluates a model on various metrics. The evaluation happens per sample basis and not on
aggregated individual basis.
"""
prf = precision_recall_fscore_support(ytest, ytest_pred, average="macro")
accuracy = accuracy_score(ytest, ytest_pred)
mcc = matthews_corrcoef(ytest, ytest_pred)
tn, fp, fn, tp = confusion_matrix(ytest, ytest_pred).ravel()
specificity = tn / (tn + fp)
sensitivity = tp / (tp + fn)
kappa = cohen_kappa_score(ytest, ytest_pred)
fpr, tpr, thresholds = metrics.roc_curve(ytest, ytest_probas[:, 1])
aucroc = metrics.auc(fpr, tpr)
ap = average_precision_score(ytest, ytest_probas[:, 1])
res = OrderedDict()
res["Accuracy"] = accuracy
res["Precision"] = prf[0]
res["Recall"] = prf[1]
res["F1-Score"] = prf[2]
res["MCC"] = mcc
res["Specificity"] = specificity
res["Sensitivity"] = sensitivity
res["Kappa"] = kappa
res["AUCROC"] = aucroc
res["AP"] = ap
return res
def print_confusion_matrix(self, testlabel, y_pred, title_name):
# global figcount
# global Figureset
cf = confusion_matrix(testlabel, y_pred)
df_cm =
|
pd.DataFrame(cf, index=[0, 1], columns=[0, 1])
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1:
|
pd.Timestamp("1961-02-01 00:00:00")
|
pandas.Timestamp
|
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["skew"])
def skew(self, **kwargs):
return super().skew(**kwargs)
_agg_doc = dedent(
"""
Examples
--------
The example below will show a rolling calculation with a window size of
four matching the equivalent function call using `scipy.stats`.
>>> arr = [1, 2, 3, 4, 999]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> import scipy.stats
>>> print(fmt.format(scipy.stats.kurtosis(arr[:-1], bias=False)))
-1.200000
>>> print(fmt.format(scipy.stats.kurtosis(arr[1:], bias=False)))
3.999946
>>> s = pd.Series(arr)
>>> s.rolling(4).kurt()
0 NaN
1 NaN
2 NaN
3 -1.200000
4 3.999946
dtype: float64
"""
)
@Appender(_agg_doc)
@Substitution(name="rolling")
@Appender(_shared_docs["kurt"])
def kurt(self, **kwargs):
return super().kurt(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["quantile"])
def quantile(self, quantile, interpolation="linear", **kwargs):
return super().quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["cov"])
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
return super().cov(other=other, pairwise=pairwise, ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["corr"])
def corr(self, other=None, pairwise=None, **kwargs):
return super().corr(other=other, pairwise=pairwise, **kwargs)
class RollingGroupby(_GroupByMixin, Rolling):
"""
Provide a rolling groupby implementation.
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return Rolling
def _gotitem(self, key, ndim, subset=None):
# we are setting the index on the actual object
# here so our index is carried thru to the selected obj
# when we do the splitting for the groupby
if self.on is not None:
self._groupby.obj = self._groupby.obj.set_index(self._on)
self.on = None
return super()._gotitem(key, ndim, subset=subset)
def _validate_monotonic(self):
"""
Validate that on is monotonic;
we don't care for groupby.rolling
because we have already validated at a higher
level.
"""
pass
class Expanding(_Rolling_and_Expanding):
"""
Provide expanding transformations.
.. versionadded:: 0.18.0
Parameters
----------
min_periods : int, default 1
Minimum number of observations in window required to have a value
(otherwise result is NA).
center : bool, default False
Set the labels at the center of the window.
axis : int or str, default 0
Returns
-------
a Window sub-classed for the particular operation
See Also
--------
rolling : Provides rolling window calculations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> df.expanding(2).sum()
B
0 NaN
1 1.0
2 3.0
3 3.0
4 7.0
"""
_attributes = ["min_periods", "center", "axis"]
def __init__(self, obj, min_periods=1, center=False, axis=0, **kwargs):
super().__init__(obj=obj, min_periods=min_periods, center=center, axis=axis)
@property
def _constructor(self):
return Expanding
def _get_window(self, other=None):
"""
Get the window length over which to perform some operation.
Parameters
----------
other : object, default None
The other object that is involved in the operation.
Such an object is involved for operations like covariance.
Returns
-------
window : int
The window length.
"""
axis = self.obj._get_axis(self.axis)
length = len(axis) + (other is not None) * len(axis)
other = self.min_periods or -1
return max(length, other)
_agg_see_also_doc = dedent(
"""
See Also
--------
DataFrame.expanding.aggregate
DataFrame.rolling.aggregate
DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.ewm(alpha=0.5).mean()
A B C
0 -2.385977 -0.102758 0.438822
1 -1.464856 0.569633 -0.490089
2 -0.207700 0.149687 -1.135379
3 -0.471677 -0.645305 -0.906555
4 -0.355635 -0.203033 -0.904111
5 1.076417 1.503943 -1.146293
6 -0.041654 1.925562 -0.588728
7 0.680292 0.132049 0.548693
8 0.067236 0.948257 0.163353
9 -0.286980 0.618493 -0.694496
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="expanding")
@Appender(_shared_docs["count"])
def count(self, **kwargs):
return super().count(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_expanding_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_expanding_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_expanding_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_expanding_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="expanding")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_expanding_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@
|
Substitution(name="expanding")
|
pandas.util._decorators.Substitution
|
#%%
#importing...
import yfinance as yf
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
from datetime import datetime as dt
from sklearn.model_selection import train_test_split
import numpy as np
import matplotlib.pyplot as plt
Scaler = MinMaxScaler(feature_range=(0,1))
from sklearn.linear_model import LinearRegression
#imports for model
from keras.models import Sequential
from keras.layers import LSTM, Dense, Dropout
from sklearn.model_selection import train_test_split
import math
from sklearn.metrics import mean_squared_error,accuracy_score
import sys
#sys.path.append('../DLpart/')
#from PredictStock import Technicals
import datetime
class LSTMPrediction:
def __init__(self,symbol,look_back):
self.symbol = symbol
self.timeframe = look_back
def fetchFromYahoo(self):
yobj = yf.Ticker(self.symbol)
tickerDict = yobj.info
#print(yobj.info.keys())
df = yobj.history(period=self.timeframe)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
#print('\n'+tickerDict['longBusinessSummary'])
print(df.tail())
plt.plot(df['Close'])
return df,tickerDict
def get_train_test_dataset(self,df,training_size=0.70,testing_size=0.30):
try:
print('this will return a training and test data')
print('\n'+'Recent Data' + '\n',df.tail())
print('MEAN CLOSE: ',df['Close'].mean())
print('MAX CLOSE: ',df['Close'].max())
print('MIN CLOSE: ',df['Close'].min())
close_price = df.reset_index()['Close']
close_price = Scaler.fit_transform(np.array(close_price).reshape(-1,1))
train_size = int(len(close_price)*training_size)
test_size = int(len(close_price*testing_size))
train_data = close_price[0:train_size,:]
test_data = close_price[train_size:len(close_price),:1]
return train_data,test_data
except ValueError:
print('Try a different Scrip')
def prepare_data_for_LSTM_krish(self,dataset,timestep=1):
dataX, dataY = [], []
for i in range(len(dataset)- timestep-1):
record = dataset[i:(i+timestep),0]
dataX.append(record)
dataY.append(dataset[i + timestep, 0])
return np.array(dataX), np.array(dataY)
def prepare_data_for_LSTM_kaggle(self,dataset):
dataX = []
dataY = []
for i in range(60, len(dataset)):
dataX.append(dataset[i-60:i, 0])
dataY.append(dataset[i, 0])
if i<=61 :
print(dataX)
print(dataY)
print()
dataX, dataY = np.array(dataX), np.array(dataY)
return dataX, dataY
def reshape_for_LSTM(self,train_data, test_data):
train_data = train_data.reshape(train_data.shape[0],train_data.shape[1],1)
test_data = test_data.reshape(test_data.shape[0],test_data.shape[1],1)
return train_data, test_data
def create_LSTM_model(self,lstm_layers_after_main=0,lstm_units=32,shape=(),loss='mean_squared_error',optimizer='adam'):
dropout = 0.0
model = Sequential()
model.add(LSTM(lstm_units,return_sequences=True,input_shape=shape))
if lstm_layers_after_main > 2 and lstm_layers_after_main < 5:
dropout = 0.4
elif lstm_layers_after_main <= 2:
dropout = 0.1
for i in range(lstm_layers_after_main):
model.add(LSTM(lstm_units,return_sequences=True))
if i % 2 == 0:
continue
model.add(Dropout(dropout))
model.add(LSTM(lstm_units))
model.add(Dense(1))
print('Dropping out ' + str(dropout*100) + '%')
model.summary()
model.compile(loss=loss,optimizer=optimizer)
return model
class LinearRegPrediction:
def get_preds_lin_reg(self, df, target_col='Close'):
regressor = LinearRegression()
x = df.drop(target_col, axis=1)
y = df[target_col]
xtrain, xtest, ytrain, ytest = train_test_split(x,y,test_size=0.1, random_state=0)
regressor.fit(xtrain, ytrain)
y_pred = regressor.predict(xtest)
ytest = np.array(ytest).reshape(-1,1)
y_pred = np.array(y_pred).reshape(-1,1)
print(regressor.score(ytest,y_pred))
#pred_min = min(y_pred)
#print(pred_min)
valid = pd.DataFrame()
valid['Valid'] = ytest
valid['Prediction'] = y_pred
print('Standard Deviation: ',np.std(y_pred))
print('RMSE: ' , np.sqrt(mean_squared_error(ytest,y_pred)))
class Technicals:
def __init__(self,symbol):
self.symbol = symbol
def EMA(self,timeframe=9,on_field='Close',plot=False, period = "1y", interval = "1d"):
df = yf.Ticker(self.symbol).history(period=period, interval=interval)
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
EMA = df[on_field].ewm(span=timeframe, adjust=False).mean()
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, df_new.y, label='price')
plt.plot(df_new.ds, EMA, label='EMA line',color='red')
plt.show()
#print('Latest EMA on '+on_field+': ',EMA[len(EMA)-1],'\n')
#return EMA
return EMA[len(EMA)-1]
def MACD(self,on_field='Close',plot=False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df.index = pd.to_datetime(df.index)
df_new = df[[on_field]]
df_new.reset_index(level=0, inplace=True)
df_new.columns=['ds','y']
#df_new.head()
EMA12 = df_new.y.ewm(span=12, adjust=False).mean()
EMA26 = df_new.y.ewm(span=26, adjust=False).mean()
MACD = EMA12-EMA26
EMA9 = MACD.ewm(span=9, adjust=False).mean()
#plt.plot(df_new.ds, df_new.y, label='price')
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_new.ds, MACD, label=self.symbol+' MACD', color='blue')
plt.plot(df_new.ds, EMA9, label=self.symbol+' Signal Line', color='red')
plt.legend(loc='upper left')
plt.show()
#print('\n')
#print(EMA9[len(EMA9)-1], MACD[len(MACD)-1])
if MACD[len(MACD)-1] > MACD[len(MACD)-2]:
return True
else:
return False
# if MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= 0:
# print('ALERT: MACD crossover about to occur, Sell side')
# elif MACD[len(MACD)-1]-EMA9[len(EMA9)-1] >= -4 and MACD[len(MACD)-1]-EMA9[len(EMA9)-1] <= 0:
# print('ALERT: MACD crossover about to occur, Buy side')
# else:
# print('No MACD crossovers')
#return EMA9[len(EMA9)-1], MACD[len(MACD)-1] #latest value of EMA9 line and MACD value
def RSI_backUpCode(self, period = 14):
# If the RSI value is over 70, the security is considered overbought, if the value is lower than 30,
# it is considered to be oversold
# Using a conservative approach, sell when the RSI value intersects the overbought line
# buy when the value intersects the oversold line (for blue chip stocks)
yobj = yf.Ticker(self.symbol)
df = yobj.history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
change = []
gain = []
loss = []
AvgGain = []
AvgLoss = []
RS = []
RSI = []
df_new = pd.DataFrame(df['Close'], index=df.index)
change.insert(0,0)
#change calc
for i in range(1,len(df_new)):
diff = df_new.Close[i] - df_new.Close[i-1]
change.append(diff)
df_new['Change'] = change
#Gain and loss
for i in range(len(df_new)):
if df_new.Change[i] > 0:
gain.append(df_new.Change[i])
loss.append(0)
elif df_new.Change[i] < 0:
loss.append(abs(df_new.Change[i]))
gain.append(0)
else:
gain.append(0)
loss.append(0)
df_new['Gain'] = gain
df_new['Loss'] = loss
#average gain/loss
averageSum_forgain = 0
averageSum_forloss = 0
averageGain = 0
averageLoss = 0
count = 1
for i in range(0,len(df_new)):
averageSum_forgain = averageSum_forgain + df_new.Gain[i]
averageGain = averageSum_forgain/count
AvgGain.insert(i,round(averageGain,4))
averageSum_forloss = averageSum_forloss + df_new.Loss[i]
averageLoss = averageSum_forloss/count
AvgLoss.insert(i,round(averageLoss,4))
count+=1
if averageGain == 0 or averageLoss == 0:
RS.append(0.0)
else:
RS.append(averageGain/averageLoss)
df_new['AvgGain'] = AvgGain
df_new['AvgLoss'] = AvgLoss
df_new['RS'] = RS
rsi = 0
for i in range(0,len(df_new)):
rsi = 100 - 100/(1+df_new.RS[i])
RSI.append(round(rsi,2))
df_new['RSI'] = RSI
plt.figure(figsize=(16,8))
plt.plot(df_index[len(df_new)-period:len(df_new)],df_new.iloc[len(df_new)-period:len(df_new),-1], label='RSI value')
plt.legend(loc='upper left')
plt.show()
print('\nCurrent RSI value: ' , df_new['RSI'][-1])
Latest_RSI_value = float(df_new['RSI'][-1])
return df_new, Latest_RSI_value
def RSI(self,period = 14, plot = False):
df = yf.Ticker(self.symbol).history(period="1y")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
change = []
gain = []
loss = []
AvgGain = []
AvgLoss = []
RS = []
RSI = []
df_new = pd.DataFrame(df['Close'], index=df.index)
change.insert(0,0)
#change calc
for i in range(1,len(df_new)):
diff = df_new.Close[i] - df_new.Close[i-1]
change.append(diff)
df_new['Change'] = change
#Gain and loss
for i in range(len(df_new)):
if df_new.Change[i] > 0:
gain.append(df_new.Change[i])
loss.append(0)
elif df_new.Change[i] < 0:
loss.append(abs(df_new.Change[i]))
gain.append(0)
else:
gain.append(0)
loss.append(0)
df_new['Gain'] = gain
df_new['Loss'] = loss
#average gain/loss
averageSum_forgain = 0
averageSum_forloss = 0
averageGain = 0
averageLoss = 0
count = 1
for i in range(0,len(df_new)):
averageSum_forgain = averageSum_forgain + df_new.Gain[i]
averageGain = averageSum_forgain/count
AvgGain.insert(i,averageGain)
averageSum_forloss = averageSum_forloss + df_new.Loss[i]
averageLoss = averageSum_forloss/count
AvgLoss.insert(i,averageLoss)
count+=1
if averageGain == 0 or averageLoss == 0:
RS.append(0.0)
else:
RS.append(averageGain/averageLoss)
df_new['AvgGain'] = AvgGain
df_new['AvgLoss'] = AvgLoss
df_new['RS'] = RS
rsi = 0
for i in range(len(df)-14,len(df)):
rsi = 100 - 100/(1+df_new.RS[i])
RSI.append(round(rsi,2))
#df_new['RSI'] = RSI
if plot == True:
plt.figure(figsize=(16,8))
plt.plot(df_index[len(df_new)-period:len(df_new)],RSI, label='RSI value')
plt.legend(loc='upper left')
plt.show()
print('\nCurrent RSI value: ' , RSI[len(RSI)-1])
Latest_RSI_value = RSI[-1]
Previous_day_rsi_value = RSI[-2]
if (Previous_day_rsi_value < Latest_RSI_value) and (Latest_RSI_value >= 40) and (Latest_RSI_value <= 60):
return True
else:
return False
#return df_new, RSI
#return RSI
#return Latest_RSI_value
def BollingerBands(self, degree_of_freedom = 20, period = 20, on_field = 'Close'):
yobj = yf.Ticker(self.symbol)
df = yobj.history(period="1mo")
df = df.drop(['Stock Splits','Dividends'],axis=1)
df_index = pd.to_datetime(df.index)
#print(df[on_field].rolling(window = period).sum()/period)
#SMA calculated
MA = df[on_field].rolling(window = period).sum()/period
typical_price = []
#printing SMA
#printing BOLU
BOLU = []
BOLD = []
for i in range(len(df)-period,len(df)):
#typical price = (high+low+close)/3
typical_price.append((df.iloc[i,1] + df.iloc[i,2] + df.iloc[i,3]) / 3)
typical_price = pd.Series(typical_price)
for i in range(len(typical_price)):
std = 2*( math.sqrt( math.pow(i-typical_price.mean(),2) / len(typical_price) ) )
BOLU.append(typical_price[i] + std)
BOLD.append(typical_price[i] - std)
# BOLU = pd.Series(BOLU)
# BOLD = pd.Series(BOLD)
print("Middle value: " + str(MA.iloc[-1]))
print("Upper Band: " + str(BOLU[-1]))
print("Lower Band: " + str(BOLD[-1]))
#general analysis
class StockListAnalysis:
def __init__(self):
self.niftyColumns = ['SYMBOL','OPEN','HIGH','LOW','PREVCLOSE','LTP','TODAYS_CHANGE',
'CHANGE_%','VOLUME','VALUE','52WH','52WL','1Y_CHANGE%','1M_CHANGE%']
self.niftySectorColumns = ['INDEX','CURRENT','%CHANGE','OPEN','HIGH','LOW','PREVCLOSE','PREVDAY','1W_CLOSE','1M_CLOSE','1Y_CLOSE',
'52WH','52WL','1Y_CHANGE%','1M_CHANGE%']
try:
self.nifty_100_data = pd.read_csv('../PreFedIndexData/MW-NIFTY-100-'+datetime.datetime.strftime(datetime.datetime.today(),"%d-%b-%Y")+'.csv', names=self.niftyColumns,header=0)
self.nifty_sector_data = pd.read_csv('../PreFedIndexData/MW-All-Indices-'+datetime.datetime.strftime(datetime.datetime.today(),"%d-%b-%Y")+'.csv', names=self.niftySectorColumns, header=0)
except FileNotFoundError:
self.nifty_100_data = pd.read_csv('PreFedIndexData/MW-NIFTY-100-'+'12-Jun-2021'+'.csv', names=self.niftyColumns,header=0)
self.nifty_sector_data = pd.read_csv('PreFedIndexData/MW-All-Indices-'+'12-Jun-2021'+'.csv', names=self.niftySectorColumns, header=0)
def AnalyzeNiftySectors(self):
print('\nBest Sectors to invest in right now...')
#FILTERING NIFTY SECTORS ABOVE
NIFTY_SECTORS = pd.DataFrame(self.nifty_sector_data.iloc[[13,14,15,17,18,19,20,21,22,23,24,44,45],:].values, columns=self.niftySectorColumns)
NIFTY_SECTORS = NIFTY_SECTORS.reset_index(drop=True)
#NIFTY_SECTORS.columns
Highest_1Y_return_sectors = []
Highest_1M_return_sectors = []
for i in range(len(NIFTY_SECTORS)):
if float(NIFTY_SECTORS['1Y_CHANGE%'][i]) > 50:
#print(NIFTY_SECTORS['INDEX'][i])
Highest_1Y_return_sectors.append([NIFTY_SECTORS['INDEX'][i],NIFTY_SECTORS['1Y_CHANGE%'][i]])
if float(NIFTY_SECTORS['1M_CHANGE%'][i]) > 10:
#print(NIFTY_SECTORS['INDEX'][i])
Highest_1M_return_sectors.append([NIFTY_SECTORS['INDEX'][i],NIFTY_SECTORS['1M_CHANGE%'][i]])
return pd.DataFrame(Highest_1Y_return_sectors, columns=['SECTOR','365_DAY_RETURN%']) , pd.DataFrame(Highest_1M_return_sectors, columns=['SECTOR','30_DAY_RETURN%'])
def SwingTrade(self):
#FILTERING NIFTY 100
nifty_filtered = []
for i in range(1,len(self.nifty_100_data)):
try:
if (float(self.nifty_100_data['1Y_CHANGE%'][i])>50) and (float(self.nifty_100_data['1M_CHANGE%'][i])>5):
#self.nifty_100_data['1Y_CHANGE%'][i]
#print(self.nifty_100_data['SYMBOL'][i])
nifty_filtered.append([self.nifty_100_data['SYMBOL'][i],self.nifty_100_data['1M_CHANGE%'][i]])
except:
continue
nifty_filtered = pd.DataFrame(nifty_filtered, columns = ['SYMBOL','1_MONTH_RETURN%'])
#SUGGESTIONS
suggestions = []
print('\n Please wait this might take a few seconds... \n')
for i in range(len(nifty_filtered)):
LTP = round(float(yf.Ticker(nifty_filtered['SYMBOL'][i]+'.NS').history(period='1d')['Close'][-1]),ndigits=2)
suggestions.append([nifty_filtered['SYMBOL'][i],nifty_filtered['1_MONTH_RETURN%'][i],[Technicals(nifty_filtered['SYMBOL'][i] + '.NS').RSI()],[Technicals(nifty_filtered['SYMBOL'][i] + '.NS').EMA(timeframe=50)<LTP],[Technicals(nifty_filtered['SYMBOL'][i] + '.NS').MACD()],LTP,round(Technicals(nifty_filtered['SYMBOL'][i] + '.NS').EMA(timeframe=20, interval= "60m"),ndigits=-1)])
suggestions =
|
pd.DataFrame(suggestions, columns = ['SYMBOL','1_MONTH_RETURN%','GOOD_RSI_VALUE','LTP_ABOVE_50_EMA','GOOD_MACD','LAST_TRADED_PRICE_₹','20_EMA'])
|
pandas.DataFrame
|
import os
import shutil
from attrdict import AttrDict
import numpy as np
import pandas as pd
from scipy.stats import gmean
from deepsense import neptune
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold
from . import pipeline_config as cfg
from .pipelines import PIPELINES
from .hyperparameter_tuning import RandomSearchTuner, HyperoptTuner, SkoptTuner, set_params
from .utils import init_logger, read_params, set_seed, create_submission, verify_submission, calculate_rank, \
read_oof_predictions, parameter_eval
set_seed(cfg.RANDOM_SEED)
logger = init_logger()
ctx = neptune.Context()
params = read_params(ctx, fallback_file='./configs/neptune.yaml')
class PipelineManager:
def train(self, pipeline_name, dev_mode):
train(pipeline_name, dev_mode)
def evaluate(self, pipeline_name, dev_mode):
evaluate(pipeline_name, dev_mode)
def predict(self, pipeline_name, dev_mode, submit_predictions):
predict(pipeline_name, dev_mode, submit_predictions)
def train_evaluate_cv(self, pipeline_name, model_level, dev_mode):
train_evaluate_cv(pipeline_name, model_level, dev_mode)
def train_evaluate_predict_cv(self, pipeline_name, model_level, dev_mode, submit_predictions):
train_evaluate_predict_cv(pipeline_name, model_level, dev_mode, submit_predictions)
def train(pipeline_name, dev_mode):
logger.info('TRAINING')
if bool(params.clean_experiment_directory_before_training) and os.path.isdir(params.experiment_directory):
logger.info('Cleaning experiment_directory...')
shutil.rmtree(params.experiment_directory)
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting into train and test...')
train_data_split, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in train: {}'.format(train_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Train shape: {}'.format(train_data_split.shape))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
train_data = {'main_table': {'X': train_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': train_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
'X_valid': valid_data_split.drop[cfg.TARGET_COLUMNS].values.reshape(-1),
'y_valid': valid_data_split[cfg.TARGET_COLUMNS].values.reshape(-1),
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=True)
pipeline.clean_cache()
logger.info('Start pipeline fit and transform')
pipeline.fit_transform(train_data)
pipeline.clean_cache()
def evaluate(pipeline_name, dev_mode):
logger.info('EVALUATION')
logger.info('Reading data...')
tables = _read_data(dev_mode)
logger.info('Shuffling and splitting to get validation split...')
_, valid_data_split = train_test_split(tables.train_set,
test_size=params.validation_size,
random_state=cfg.RANDOM_SEED,
shuffle=params.shuffle)
logger.info('Target mean in valid: {}'.format(valid_data_split[cfg.TARGET_COLUMNS].mean()))
logger.info('Valid shape: {}'.format(valid_data_split.shape))
y_true = valid_data_split[cfg.TARGET_COLUMNS].values
eval_data = {'main_table': {'X': valid_data_split.drop(cfg.TARGET_COLUMNS, axis=1),
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(eval_data)
pipeline.clean_cache()
y_pred = output['prediction']
logger.info('Calculating ROC_AUC on validation set')
score = roc_auc_score(y_true, y_pred)
logger.info('ROC_AUC score on validation is {}'.format(score))
ctx.channel_send('ROC_AUC', 0, score)
def predict(pipeline_name, dev_mode, submit_predictions):
logger.info('PREDICTION')
tables = _read_data(dev_mode)
test_data = {'main_table': {'X': tables.test_set,
'y': None,
},
'application': {'X': tables.application},
'bureau_balance': {'X': tables.bureau_balance},
'bureau': {'X': tables.bureau},
'credit_card_balance': {'X': tables.credit_card_balance},
'installments_payments': {'X': tables.installments_payments},
'pos_cash_balance': {'X': tables.pos_cash_balance},
'previous_application': {'X': tables.previous_application},
}
pipeline = PIPELINES[pipeline_name](config=cfg.SOLUTION_CONFIG, train_mode=False)
pipeline.clean_cache()
logger.info('Start pipeline transform')
output = pipeline.transform(test_data)
pipeline.clean_cache()
y_pred = output['prediction']
if not dev_mode:
logger.info('creating submission file...')
submission = create_submission(tables.test_set, y_pred)
logger.info('verifying submission...')
sample_submission =
|
pd.read_csv(params.sample_submission_filepath)
|
pandas.read_csv
|
"""
Tests for Series timezone-related methods
"""
from datetime import datetime
from dateutil.tz import tzoffset
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
class TestSeriesTimezones:
def test_dateutil_tzoffset_support(self):
values = [188.5, 328.25]
tzinfo = tzoffset(None, 7200)
index = [
datetime(2012, 5, 11, 11, tzinfo=tzinfo),
datetime(2012, 5, 11, 12, tzinfo=tzinfo),
]
series = Series(data=values, index=index)
assert series.index.tz == tzinfo
# it works! #2443
repr(series.index[0])
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize(
"method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
)
def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz):
# GH 6326
result = Series(
np.arange(0, 5), index=
|
date_range("20131027", periods=5, freq="1H", tz=tz)
|
pandas.core.indexes.datetimes.date_range
|
from django.shortcuts import render
from django.http import HttpResponse
from django.views import View
import pytz
import numpy as np
from datetime import datetime, time
import pandas as pd
import os, subprocess, psutil
from django.conf.urls.static import static
from . forms import SubmitTickerSymbolForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) #points to static folder
class CommandCenterView(View):
def __init__(self):
self.the_form = SubmitTickerSymbolForm()
self.month_year = datetime.now().strftime('%d | %B | %Y')
def contextRender(self, request,*args,**kwargs):
'''Common context renderer for the CommandCenterView'''
context = {
"title": "Command center",
"form": self.the_form,
"month_year": self.month_year,
"twsRunning": kwargs['msg'],
}
return render(request, "ib/commandCenter.html", context)
def get(self, request, *args, **kwargs):
t_msg = "Keep up the good work :)"
return self.contextRender(request\
,msg=t_msg)
def post(self, request, *args, **kwargs):
form = SubmitTickerSymbolForm(request.POST)
# launch trader work station(TWS)
if request.method == 'POST' and 'launchTws' in request.POST.keys():
if "tws.exe" in (p.name() for p in psutil.process_iter()):
t_msg = "TWS is running..."
return self.contextRender(request\
,msg=t_msg)
else:
subprocess.Popen(['C:\\Jts\\tws.exe'])
t_msg = "Launching TWS..."
return self.contextRender(request\
,msg=t_msg)
#add a ticker to forex list
elif request.method == 'POST' and 'forexQuote0' in request.POST.keys():
fName = "static\\csv\\forexWatchList.csv"
csvPathForex = os.path.join(BASE_DIR, fName )
forex_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathForex)
except:
emptydf.to_csv(csvPathForex, sep=',', index=False)
df = pd.read_csv(csvPathForex)
client_id = [i for i in range(20, 25) if i not in df['clientid'].values ][0]
if forex_ticker in df['ticker'].values:
t_msg = "FAILED! "+forex_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
insertPoint = len(df['ticker'].values)
df.loc[insertPoint, 'ticker'] = forex_ticker # df.loc is the trick to add to eend of row
df.loc[insertPoint, 'clientid'] = client_id
df.to_csv(csvPathForex, sep=',', index=False)
t_msg = " Added " + forex_ticker+ " to FOREX list"
return self.contextRender(request\
,msg=t_msg)
#add a ticker to stock list
elif request.method == 'POST' and 'stockQuote0' in request.POST.keys():
fName = "static\\csv\\stockWatchList.csv"
csvPathStock = os.path.join(BASE_DIR, fName )
stock_ticker = form.data['tickerSymbol'].upper()
columns = ['ticker', 'pid', 'clientid']
emptydf = pd.DataFrame(columns=columns)
try:
df = pd.read_csv(csvPathStock)
except:
emptydf.to_csv(csvPathStock, sep=',', index=False)
df = pd.read_csv(csvPathStock)
# insertPoint = len([i for i in df['ticker'].values if isinstance(i, str)])
client_id = [i for i in range(5, 20) if i not in df['clientid'].values ][0]
if stock_ticker in df['ticker'].values:
t_msg = "FAILED! "+stock_ticker+ " is already in the STOCK list"
return self.contextRender(request\
,msg=t_msg)
else:
#create emty csv to deal with file not found error
fName = "static\\csv\\realtimeData\\" + stock_ticker + "_raw_realtime_ib.csv"
csvPath = os.path.join(BASE_DIR, fName ) # original data
columns = ['Time', 'Open', 'High', 'Low', 'Close']
try:
if datetime.fromtimestamp(os.path.getmtime(csvPath)).date() < \
datetime.now(tz=pytz.timezone('US/Eastern')).date():
emptyDf = pd.DataFrame(columns=columns)
emptyDf.to_csv(csvPath, sep=',', index=False)
except:
emptyDf =
|
pd.DataFrame(columns=columns)
|
pandas.DataFrame
|
# 1. Import packages
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import sys
import os
import gensim
from tqdm import tqdm
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Activation
import logging
import re
from utils.stemming import stemming_row
from utils.tokens import word_tokens
from utils.dictionary import create_dict
from utils.tfidf import tfidf
from utils.longest import longest_question
from utils.distance import distance
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',level=logging.INFO)
# 2. User setting 1: Import data
# Important: data needs to be stored in directory 'data' in parent folder of current working directory
path = os.getcwd()
os.chdir(path)
train_df = pd.read_csv("data/train_data.csv", delimiter=',')
test_df = pd.read_csv("data/test_data.csv", delimiter=',')
train_duplicate = pd.read_csv("data/train_labels.csv", delimiter=',')
#questions = list(train_df['question1'].values.astype('U')) + list(train_df['question2'].values.astype('U'))
# -------------------------------------------- SECTION 2
# As you see, we start over with `train_df` in TFIDF
print('question1: ', train_df['question1'])
def filter(text):
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
return text
train_df['question1'] = [filter(question1) for question1 in train_df['question1']]
train_df['question2'] = [filter(question2) for question2 in train_df['question2']]
test_df['question1'] = [filter(question1) for question1 in test_df['question1']]
test_df['question2'] = [filter(question2) for question2 in test_df['question2']]
# 7. TF-IDF
train_df['q1_feats'] = tfidf(train_df['question1'])
train_df['q2_feats'] = tfidf(train_df['question2'])
test_df['q1_feats'] = tfidf(test_df['question1'])
test_df['q2_feats'] = tfidf(test_df['question2'])
# 8. Train set
train_longest = longest_question(train_df['q1_feats'], train_df['q2_feats'])
test_longest = longest_question(test_df['q1_feats'], test_df['q2_feats'])
train_questions1 = sequence.pad_sequences(train_df['q1_feats'], train_longest)
train_questions2 = sequence.pad_sequences(train_df['q2_feats'], train_longest)
test_questions1 = sequence.pad_sequences(test_df['q1_feats'], test_longest)
test_questions2 = sequence.pad_sequences(test_df['q2_feats'], test_longest)
train_distances = distance(train_questions1, train_questions2)
test_distances = distance(test_questions1, test_questions2)
model = Sequential()
model.add(Dense(64, input_dim=1, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
#print('distances: ', distances)
model.fit(train_distances, train_duplicate['is_duplicate'], epochs=10, batch_size=32)
predictions = model.predict(test_distances, verbose=1)
rounded = [int(round(x[0])) for x in predictions]
submission_df =
|
pd.DataFrame(index=test_df.test_id, columns=['is_duplicate'], dtype=np.uint)
|
pandas.DataFrame
|
import dash
import dash_table
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
# Read data from a csv
z_data = pd.read_csv(
'https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.config.suppress_callback_exceptions = False
tab_selected_style = {
'backgroundColor': '#119DFF',
'color': 'white'
}
indata = pd.read_csv('input.txt', delim_whitespace=True, escapechar='#')
indata.rename(columns=lambda x: x.strip(), inplace=True)
outdata =
|
pd.read_csv('output.txt', delim_whitespace=True, escapechar='#')
|
pandas.read_csv
|
"""
Author: <NAME>
Date: 6th/July/2020
Copyright: <NAME>, 2020
email: <EMAIL>
website: https://johdev.com
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import joblib
class ModelGenerator(object):
"""A General Model Generator Class to perform following opreation:
* Add Model as a list into the Generator.
* Perform Cross Validation process to calculate the MSE(Mean Square Error).
* Select the model from the list which perform the best MSE result.
* Perform the prediction for the test data based on the best performance model.
Parameters
----------------
- models: List, a list to accept Model instance from Scikit-Learn.
- data: Class, a data preprocessing class from `preprocessing.py`
- best_model = None
- predictions = None
- mean_mse = {}
Method
----------------
add_model(self, model):
- Append the new model instance from scikit-learn into `models` list
cross_validate(self, k=5, n_proces=-1):
- Perform Cross Validation on the `models` list's model based on `data` class,
in default 5 folds Cross Validation, and default `-1` n_jobs
- return the `mean_mse` dict with model name and MES
select_best_model(self):
- select the model with lowest MES value.
- return `best_model`
best_model_fit(self, features, targets):
- Train the best model from `best_model`
best_model_predict(self, features):
- make prediction on test set
- return `predictions`
save_results(self):
- save the best performance model in `.joblib` file in ./models folder
Static Method
----------------
get_feature_importance(models, col):
- determine whether the particular model have `feature_importances_` attribute
if yes, print out the `feature_importances_`
Examples
----------------
>>> from src.preprocessing import DataSetGenerator
>>> from src.features.build_features import FeatureGenerator
>>> from src.models.predict_model import ModelGenerator
>>> data = DataSetGenerator(train_feature_file, train_target_file, test_file, cat_cols, num_cols, target_col, id_col)
>>> models = ModelGenerator(models=[], data=data)
>>> models.add_model(LinearRegression())
>>> models.add_model(RandomForestRegressor(n_estimators=100, n_jobs=-1, max_depth=15, min_samples_split=80,
max_features=8))
>>> models.cross_validate()
>>> models.select_best_model()
>>> models.best_model_fit(...)
>>> models.best_model_predict(...)
>>> models.get_feature_importance(...)
>>> models.save_results('model.pkl')"""
def __init__(self, models, data):#, default_num_iters=10, verbose_lvl=0):
'''initializes model list and dicts'''
self.models = list(models)
self.data = data
self.best_model = None
self.predictions = None
self.mean_mse = {}
#self.default_num_iters = default_num_iters
#self.verbose_lvl = verbose_lvl
def add_model(self, model):
self.models.append(model)
def cross_validate(self, k=5, num_procs=-1):
'''cross validate models using given data'''
feature_df = self.data.train_df[self.data.feature_cols]
target_df = self.data.train_df[self.data.target_col]
for model in self.models:
neg_mse = cross_val_score(model, feature_df, target_df, cv=k, n_jobs=num_procs, scoring='neg_mean_squared_error')
self.mean_mse[model] = -1.0 * np.mean(neg_mse)
def select_best_model(self):
'''select model with lowest mse'''
self.best_model = min(self.mean_mse, key=self.mean_mse.get)
def best_model_fit(self, features, targets):
'''fits best model'''
self.best_model.fit(features, targets)
def best_model_predict(self, features):
'''scores features using best model'''
self.predictions = self.best_model.predict(features)
def save_results(self):
joblib.dump(self.best_model, '../../model/best_model.joblib')
@staticmethod
def get_feature_importance(model, cols):
'''retrieves and sorts feature importances'''
if hasattr(model, 'feature_importances_'):
importances = model.feature_importances_
feature_importances =
|
pd.DataFrame({'feature':cols, 'importance':importances})
|
pandas.DataFrame
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import scipy.stats as stats
from matplotlib import gridspec
from matplotlib.lines import Line2D
from .util import *
import seaborn as sns
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pylab as pl
import matplotlib.dates as mdates
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.patheffects as pe
from .sanker import Sanker
import imageio
class Visualizer():
def __init__(self, district_list, private_list, city_list, contract_list, bank_list, leiu_list):
self.district_list = district_list.copy()
self.private_list = private_list.copy()
for x in city_list:
self.private_list.append(x)
self.contract_list = contract_list
self.bank_list = bank_list
self.leiu_list = leiu_list
self.private_districts = {}
for x in self.private_list:
self.private_districts[x.name] = []
for xx in x.district_list:
self.private_districts[x.name].append(xx)
inflow_inputs = pd.read_csv('calfews_src/data/input/calfews_src-data.csv', index_col=0, parse_dates=True)
x2_results = pd.read_csv('calfews_src/data/input/x2DAYFLOW.csv', index_col=0, parse_dates=True)
self.observations = inflow_inputs.join(x2_results)
self.observations['delta_outflow'] = self.observations['delta_inflow'] + self.observations['delta_depletions'] - self.observations['HRO_pump'] - self.observations['TRP_pump']
self.index_o = self.observations.index
self.T_o = len(self.observations)
self.day_month_o = self.index_o.day
self.month_o = self.index_o.month
self.year_o = self.index_o.year
kern_bank_observations = pd.read_csv('calfews_src/data/input/kern_water_bank_historical.csv')
kern_bank_observations = kern_bank_observations.set_index('Year')
semitropic_bank_observations = pd.read_csv('calfews_src/data/input/semitropic_bank_historical.csv')
semitropic_bank_observations = semitropic_bank_observations.set_index('Year')
total_bank_kwb = np.zeros(self.T_o)
total_bank_smi = np.zeros(self.T_o)
for x in range(0, self.T_o):
if self.month_o[x] > 9:
year_str = self.year_o[x]
else:
year_str = self.year_o[x] - 1
if self.month_o[x] == 9 and self.day_month_o[x] == 30:
year_str = self.year_o[x]
total_bank_kwb[x] = kern_bank_observations.loc[year_str, 'Ag'] + kern_bank_observations.loc[year_str, 'Mixed Purpose']
deposit_history = semitropic_bank_observations[semitropic_bank_observations.index <= year_str]
total_bank_smi[x] = deposit_history['Metropolitan'].sum() + deposit_history['South Bay'].sum()
self.observations['kwb_accounts'] = pd.Series(total_bank_kwb, index=self.observations.index)
self.observations['smi_accounts'] = pd.Series(total_bank_smi, index=self.observations.index)
def get_results_sensitivity_number(self, results_file, sensitivity_number, start_month, start_year, start_day):
self.values = {}
numdays_index = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
with h5py.File(results_file, 'r') as f:
data = f['s' + sensitivity_number]
names = data.attrs['columns']
names = list(map(lambda x: str(x).split("'")[1], names))
df_data = pd.DataFrame(data[:], columns=names)
for x in df_data:
self.values[x] = df_data[x]
datetime_index = []
monthcount = start_month
yearcount = start_year
daycount = start_day
leapcount = np.remainder(start_year, 4)
for t in range(0, len(self.values[x])):
datetime_index.append(str(yearcount) + '-' + str(monthcount) + '-' + str(daycount))
daycount += 1
if leapcount == 0 and monthcount == 2:
numdays_month = numdays_index[monthcount - 1] + 1
else:
numdays_month = numdays_index[monthcount - 1]
if daycount > numdays_month:
daycount = 1
monthcount += 1
if monthcount == 13:
monthcount = 1
yearcount += 1
leapcount += 1
if leapcount == 4:
leapcount = 0
self.values['Datetime'] = pd.to_datetime(datetime_index)
self.values = pd.DataFrame(self.values)
self.values = self.values.set_index('Datetime')
self.index = self.values.index
self.T = len(self.values.index)
self.day_year = self.index.dayofyear
self.day_month = self.index.day
self.month = self.index.month
self.year = self.index.year
self.starting_year = self.index.year[0]
self.ending_year = self.index.year[-1]
self.number_years = self.ending_year - self.starting_year
total_kwb_sim = np.zeros(len(self.values))
total_smi_sim = np.zeros(len(self.values))
for district_partner in ['DLR', 'KCWA', 'ID4', 'SMI', 'TJC', 'WON', 'WRM']:
total_kwb_sim += self.values['kwb_' + district_partner]
self.values['kwb_total'] = pd.Series(total_kwb_sim, index = self.values.index)
for district_partner in ['SOB', 'MET']:
total_smi_sim += self.values['semitropic_' + district_partner]
self.values['smi_total'] = pd.Series(total_smi_sim, index = self.values.index)
def set_figure_params(self):
self.figure_params = {}
self.figure_params['delta_pumping'] = {}
self.figure_params['delta_pumping']['extended_simulation'] = {}
self.figure_params['delta_pumping']['extended_simulation']['outflow_list'] = ['delta_outflow', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['pump1_list'] = ['delta_HRO_pump', 'HRO_pump']
self.figure_params['delta_pumping']['extended_simulation']['pump2_list'] = ['delta_TRP_pump', 'TRP_pump']
self.figure_params['delta_pumping']['extended_simulation']['scenario_labels'] = ['Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['simulation_labels'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['observation_labels'] = ['HRO_pump', 'TRP_pump', 'delta_outflow']
self.figure_params['delta_pumping']['extended_simulation']['agg_list'] = ['AS-OCT', 'AS-OCT', 'D']
self.figure_params['delta_pumping']['extended_simulation']['unit_mult'] = [1.0, 1.0, cfs_tafd]
self.figure_params['delta_pumping']['extended_simulation']['max_value_list'] = [5000, 5000, 15]
self.figure_params['delta_pumping']['extended_simulation']['use_log_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['use_cdf_list'] = [False, False, True]
self.figure_params['delta_pumping']['extended_simulation']['scenario_type_list'] = ['observation', 'validation', 'scenario']
self.figure_params['delta_pumping']['extended_simulation']['x_label_list'] = ['Total Pumping, SWP Delta Pumps (tAF/year)', 'Total Pumping, CVP Delta Pumps (tAF/year)', 'Daily Exceedence Probability', '']
self.figure_params['delta_pumping']['extended_simulation']['y_label_list'] = ['Probability Density', 'Probability Density', 'Daily Delta Outflow (tAF)', 'Relative Frequency of Water-year Types within Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names1'] = ['Historical (1996-2016) Observations', 'Historical (1996-2016) Model Validation', 'Extended Simulation']
self.figure_params['delta_pumping']['extended_simulation']['legend_label_names2'] = ['Critical', 'Dry', 'Below Normal', 'Above Normal', 'Wet']
self.figure_params['state_estimation'] = {}
for x in ['publication', 'sacramento', 'sanjoaquin', 'tulare']:
self.figure_params['state_estimation'][x] = {}
self.figure_params['state_estimation'][x]['non_log'] = ['Snowpack (SWE)',]
self.figure_params['state_estimation'][x]['predictor values'] = ['Mean Inflow, Prior 30 Days (tAF/day)','Snowpack (SWE)']
self.figure_params['state_estimation'][x]['colorbar_label_index'] = [0, 30, 60, 90, 120, 150, 180]
self.figure_params['state_estimation'][x]['colorbar_label_list'] = ['Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr']
self.figure_params['state_estimation'][x]['subplot_annotations'] = ['A', 'B', 'C', 'D']
self.figure_params['state_estimation'][x]['forecast_periods'] = [30,'SNOWMELT']
self.figure_params['state_estimation'][x]['all_cols'] = ['DOWY', 'Snowpack', '30MA']
self.figure_params['state_estimation'][x]['forecast_values'] = []
for forecast_days in self.figure_params['state_estimation'][x]['forecast_periods']:
if forecast_days == 'SNOWMELT':
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Snowmelt Season (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append('Snowmelt Flow')
else:
self.figure_params['state_estimation'][x]['forecast_values'].append('Flow Estimation, Next ' + str(forecast_days) + ' Days (tAF)')
self.figure_params['state_estimation'][x]['all_cols'].append(str(forecast_days) + ' Day Flow')
self.figure_params['state_estimation']['publication']['watershed_keys'] = ['SHA', 'ORO', 'MIL', 'ISB']
self.figure_params['state_estimation']['publication']['watershed_labels'] = ['Shasta', 'Oroville', 'Millerton', 'Isabella']
self.figure_params['state_estimation']['sacramento']['watershed_keys'] = ['SHA', 'ORO', 'FOL', 'YRS']
self.figure_params['state_estimation']['sacramento']['watershed_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar']
self.figure_params['state_estimation']['sanjoaquin']['watershed_keys'] = ['NML', 'DNP', 'EXC', 'MIL']
self.figure_params['state_estimation']['sanjoaquin']['watershed_labels'] = ['New Melones', '<NAME>', 'Exchequer', 'Millerton']
self.figure_params['state_estimation']['tulare']['watershed_keys'] = ['PFT', 'KWH', 'SUC', 'ISB']
self.figure_params['state_estimation']['tulare']['watershed_labels'] = ['Pine Flat', 'Kaweah', 'Success', 'Isabella']
self.figure_params['model_validation'] = {}
for x in ['delta', 'sierra', 'sanluis', 'bank']:
self.figure_params['model_validation'][x] = {}
self.figure_params['model_validation']['delta']['title_labels'] = ['State Water Project Pumping', 'Central Valley Project Pumping', 'Delta X2 Location']
num_subplots = len(self.figure_params['model_validation']['delta']['title_labels'])
self.figure_params['model_validation']['delta']['label_name_1'] = ['delta_HRO_pump', 'delta_TRP_pump', 'delta_x2']
self.figure_params['model_validation']['delta']['label_name_2'] = ['HRO_pump', 'TRP_pump', 'DAY_X2']
self.figure_params['model_validation']['delta']['unit_converstion_1'] = [1.0, 1.0, 1.0]
self.figure_params['model_validation']['delta']['unit_converstion_2'] = [cfs_tafd, cfs_tafd, 1.0]
self.figure_params['model_validation']['delta']['y_label_timeseries'] = ['Pumping (tAF/week)', 'Pumping (tAF/week)', 'X2 inland distance (km)']
self.figure_params['model_validation']['delta']['y_label_scatter'] = ['(tAF/yr)', '(tAF/yr)', '(km)']
self.figure_params['model_validation']['delta']['timeseries_timestep'] = ['W', 'W', 'W']
self.figure_params['model_validation']['delta']['scatter_timestep'] = ['AS-OCT', 'AS-OCT', 'M']
self.figure_params['model_validation']['delta']['aggregation_methods'] = ['sum', 'sum', 'mean']
self.figure_params['model_validation']['delta']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['delta']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['sierra']['title_labels'] = ['Shasta', 'Oroville', 'Folsom', 'New Bullards Bar', 'New Melones', '<NAME>', 'Exchequer', 'Millerton', 'Pine Flat', 'Kaweah', 'Success', 'Isabella']
num_subplots = len(self.figure_params['model_validation']['sierra']['title_labels'])
self.figure_params['model_validation']['sierra']['label_name_1'] = ['shasta_S', 'oroville_S', 'folsom_S', 'yuba_S', 'newmelones_S', 'donpedro_S', 'exchequer_S', 'millerton_S', 'pineflat_S', 'kaweah_S', 'success_S', 'isabella_S']
self.figure_params['model_validation']['sierra']['label_name_2'] = ['SHA_storage', 'ORO_storage', 'FOL_storage', 'YRS_storage', 'NML_storage', 'DNP_storage', 'EXC_storage', 'MIL_storage', 'PFT_storage', 'KWH_storage', 'SUC_storage', 'ISB_storage']
self.figure_params['model_validation']['sierra']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sierra']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sierra']['y_label_scatter'] = []
self.figure_params['model_validation']['sierra']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sierra']['scatter_timestep'] = []
self.figure_params['model_validation']['sierra']['aggregation_methods'] = ['mean'] * num_subplots
self.figure_params['model_validation']['sierra']['notation_location'] = ['bottom'] * num_subplots
self.figure_params['model_validation']['sierra']['show_legend'] = [False] * num_subplots
counter_kaweah = self.figure_params['model_validation']['sierra']['title_labels'].index('Kaweah')
counter_success = self.figure_params['model_validation']['sierra']['title_labels'].index('Success')
counter_isabella = self.figure_params['model_validation']['sierra']['title_labels'].index('Isabella')
self.figure_params['model_validation']['sierra']['notation_location'][counter_kaweah] = 'top'
self.figure_params['model_validation']['sierra']['notation_location'][counter_success] = 'topright'
self.figure_params['model_validation']['sierra']['show_legend'][counter_isabella] = True
self.figure_params['model_validation']['sanluis']['title_labels'] = ['State (SWP) Portion, San Luis Reservoir', 'Federal (CVP) Portion, San Luis Reservoir']
num_subplots = len(self.figure_params['model_validation']['sanluis']['title_labels'])
self.figure_params['model_validation']['sanluis']['label_name_1'] = ['sanluisstate_S', 'sanluisfederal_S']
self.figure_params['model_validation']['sanluis']['label_name_2'] = ['SLS_storage', 'SLF_storage']
self.figure_params['model_validation']['sanluis']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['unit_converstion_2'] = [1.0/1000000.0] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['sanluis']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['sanluis']['scatter_timestep'] = ['M'] * num_subplots
self.figure_params['model_validation']['sanluis']['aggregation_methods'] = ['point'] * num_subplots
self.figure_params['model_validation']['sanluis']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['sanluis']['show_legend'] = [True] * num_subplots
self.figure_params['model_validation']['bank']['title_labels'] = ['Kern Water Bank Accounts', 'Semitropic Water Bank Accounts']
num_subplots = len(self.figure_params['model_validation']['bank']['title_labels'])
self.figure_params['model_validation']['bank']['label_name_1'] = ['kwb_total', 'smi_total']
self.figure_params['model_validation']['bank']['label_name_2'] = ['kwb_accounts', 'smi_accounts']
self.figure_params['model_validation']['bank']['unit_converstion_1'] = [1.0/1000.0] * num_subplots
self.figure_params['model_validation']['bank']['unit_converstion_2'] = [1.0/1000000.0, 1.0/1000.0]
self.figure_params['model_validation']['bank']['y_label_timeseries'] = ['Storage (mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['y_label_scatter'] = ['(mAF)'] * num_subplots
self.figure_params['model_validation']['bank']['timeseries_timestep'] = ['W'] * num_subplots
self.figure_params['model_validation']['bank']['scatter_timestep'] = ['AS-OCT'] * num_subplots
self.figure_params['model_validation']['bank']['aggregation_methods'] = ['change'] * num_subplots
self.figure_params['model_validation']['bank']['notation_location'] = ['top'] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'] = [False] * num_subplots
self.figure_params['model_validation']['bank']['show_legend'][0] = True
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_losthills'] = {}
self.figure_params['state_response']['sanluisstate_losthills']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_losthills']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_losthills']['groundwater_account_names'] = ['LHL','WON']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'steelblue']
self.figure_params['state_response']['sanluisstate_losthills']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_losthills']['subplot_titles'] = ['State Water Project Delta Operations', 'Lost Hills Drought Management', 'San Luis Reservoir Operations', 'Lost Hills Flood Management']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_losthills']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharged from Contract Allocation' 'Recharge of Uncontrolled Flood Spills']
self.figure_params['state_response'] = {}
self.figure_params['state_response']['sanluisstate_wheeler'] = {}
self.figure_params['state_response']['sanluisstate_wheeler']['contract_list'] = ['swpdelta',]
self.figure_params['state_response']['sanluisstate_wheeler']['contributing_reservoirs'] = ['delta_uncontrolled_swp', 'oroville', 'yuba']
self.figure_params['state_response']['sanluisstate_wheeler']['groundwater_account_names'] = ['WRM']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_features'] = ['S', 'days_til_full', 'flood_deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['reservoir_feature_colors'] = ['teal', '#3A506B', '#74B3CE', 'lightsteelblue']
self.figure_params['state_response']['sanluisstate_wheeler']['district_contracts'] = ['tableA',]
self.figure_params['state_response']['sanluisstate_wheeler']['subplot_titles'] = ['State Water Project Delta Operations', 'Wheeler Ridge Drought Management', 'San Luis Reservoir Operations', 'Wheeler Ridge Flood Management']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_1'] = ['Y.T.D Delta Pumping', 'Projected Unstored Exports', 'Projected Stored Exports, Oroville', 'Projected Stored Exports, New Bullards']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_2'] = ['Storage', 'Projected Days to Fill', 'Flood Release Deliveries']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_3'] = ['Remaining SW Allocation', 'SW Deliveries', 'Private GW Pumping', 'District GW Bank Recovery', 'Remaining GW Bank Recovery Capacity']
self.figure_params['state_response']['sanluisstate_wheeler']['legend_list_4'] = ['Carryover Recharge Capacity', 'Recharge of Uncontrolled Flood Spills', 'Recharged from Contract Allocation']
self.figure_params['district_water_use'] = {}
self.figure_params['district_water_use']['physical'] = {}
self.figure_params['district_water_use']['physical']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors', 'Groundwater Banks']
self.figure_params['district_water_use']['physical']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['physical']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler', 'northkern', 'kerntulare']
self.figure_params['district_water_use']['physical']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['physical']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['physical']['Groundwater Banks'] = ['stockdale', 'kernriverbed', 'poso', 'pioneer', 'kwb', 'b2800', 'irvineranch', 'northkernwb']
self.figure_params['district_water_use']['physical']['subplot columns'] = 2
self.figure_params['district_water_use']['physical']['color map'] = 'YlGbBu_r'
self.figure_params['district_water_use']['physical']['write file'] = True
self.figure_params['district_water_use']['annual'] = {}
self.figure_params['district_water_use']['annual']['district_groups'] = ['Municipal Districts', 'Kern County Water Agency', 'CVP - Friant Contractors', 'CVP - San Luis Contractors']
self.figure_params['district_water_use']['annual']['Municipal Districts'] = ['bakersfield', 'ID4', 'fresno', 'southbay', 'socal', 'centralcoast']
self.figure_params['district_water_use']['annual']['Kern County Water Agency'] = ['berrenda', 'belridge', 'buenavista', 'cawelo', 'henrymiller', 'losthills', 'rosedale', 'semitropic', 'tehachapi', 'tejon', 'westkern', 'wheeler']
self.figure_params['district_water_use']['annual']['CVP - Friant Contractors'] = ['arvin', 'delano', 'pixley', 'exeter', 'kerntulare', 'lindmore', 'lindsay', 'lowertule', 'porterville', 'saucelito', 'shaffer', 'sosanjoaquin', 'teapot', 'terra', 'chowchilla', 'maderairr', 'tulare', 'fresnoid']
self.figure_params['district_water_use']['annual']['CVP - San Luis Contractors'] = ['westlands', 'panoche', 'sanluiswater', 'delpuerto']
self.figure_params['district_water_use']['annual']['subplot columns'] = 2
self.figure_params['district_water_use']['annual']['color map'] = 'BrBG_r'
self.figure_params['district_water_use']['annual']['write file'] = True
self.figure_params['flow_diagram'] = {}
self.figure_params['flow_diagram']['tulare'] = {}
self.figure_params['flow_diagram']['tulare']['column1'] = ['Shasta', 'Folsom', 'Oroville', 'New Bullards', 'Uncontrolled']
self.figure_params['flow_diagram']['tulare']['row1'] = ['Delta Outflow', 'Carryover',]
self.figure_params['flow_diagram']['tulare']['column2'] = ['San Luis (Fed)', 'San Luis (State)', 'Millerton', 'Isabella', 'Pine Flat', 'Kaweah', 'Success']
self.figure_params['flow_diagram']['tulare']['row2'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column3'] = ['Exchange', 'CVP-Delta', 'Cross Valley', 'State Water Project', 'Friant Class 1','Friant Class 2', 'Kern River', 'Kings River', 'Kaweah River', 'Tule River', 'Flood']
self.figure_params['flow_diagram']['tulare']['row3'] = ['Private Pumping', 'GW Banks']
self.figure_params['flow_diagram']['tulare']['column4'] = ['Exchange', 'CVP-Delta', 'Urban', 'KCWA', 'CVP-Friant','Other']
self.figure_params['flow_diagram']['tulare']['row4'] = ['Carryover',]
self.figure_params['flow_diagram']['tulare']['column5'] = ['Irrigation', 'Urban', 'In-Lieu Recharge', 'Direct Recharge']
self.figure_params['flow_diagram']['tulare']['titles'] = ['Sacramento Basin\nSupplies', 'Tulare Basin\nSupplies', 'Surface Water\nContract Allocations', 'Contractor Groups', 'Water Use Type']
def scenario_compare(self, folder_name, figure_name, plot_name, validation_values, show_plot):
outflow_list = self.figure_params[figure_name][plot_name]['outflow_list']
pump1_list = self.figure_params[figure_name][plot_name]['pump1_list']
pump2_list = self.figure_params[figure_name][plot_name]['pump2_list']
scenario_labels = self.figure_params[figure_name][plot_name]['scenario_labels']
simulation_labels = self.figure_params[figure_name][plot_name]['simulation_labels']
observation_labels = self.figure_params[figure_name][plot_name]['observation_labels']
agg_list = self.figure_params[figure_name][plot_name]['agg_list']
unit_mult = self.figure_params[figure_name][plot_name]['unit_mult']
max_value_list = self.figure_params[figure_name][plot_name]['max_value_list']
use_log_list = self.figure_params[figure_name][plot_name]['use_log_list']
use_cdf_list = self.figure_params[figure_name][plot_name]['use_cdf_list']
scenario_type_list = self.figure_params[figure_name][plot_name]['scenario_type_list']
x_label_list = self.figure_params[figure_name][plot_name]['x_label_list']
y_label_list = self.figure_params[figure_name][plot_name]['y_label_list']
legend_label_names1 = self.figure_params[figure_name][plot_name]['legend_label_names1']
legend_label_names2 = self.figure_params[figure_name][plot_name]['legend_label_names2']
color1 = sns.color_palette('spring', n_colors = 3)
color2 = sns.color_palette('summer', n_colors = 3)
color_list = np.array([color1[0], color1[2], color2[0]])
max_y_val = np.zeros(len(simulation_labels))
fig = plt.figure(figsize = (20, 16))
gs = gridspec.GridSpec(3,2, width_ratios=[3,1], figure = fig)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[:, 1])
axes_list = [ax1, ax2, ax3]
counter = 0
for sim_label, obs_label, agg, max_value, use_log, use_cdf, ax_loop in zip(simulation_labels, observation_labels, agg_list, max_value_list, use_log_list, use_cdf_list, axes_list):
data_type_dict = {}
data_type_dict['scenario'] = self.values[sim_label].resample(agg).sum() * unit_mult[0]
data_type_dict['validation'] = validation_values[sim_label].resample(agg).sum() * unit_mult[1]
data_type_dict['observation'] = self.observations[obs_label].resample(agg).sum() * unit_mult[2]
if use_log:
for scen_type in scenario_type_list:
values_int = data_type_dict[scen_type]
data_type_dict[scen_type] = np.log(values_int[values_int > 0])
for scen_type in scenario_type_list:
max_y_val[counter] = max([max(data_type_dict[scen_type]), max_y_val[counter]])
counter += 1
if use_cdf:
for scen_type, color_loop in zip(scenario_type_list, color_list):
cdf_values = np.zeros(100)
values_int = data_type_dict[scen_type]
for x in range(0, 100):
x_val = int(np.ceil(max_value)) * (x/100)
cdf_values[x] = len(values_int[values_int > x_val])/len(values_int)
ax_loop.plot(cdf_values, np.arange(0, int(np.ceil(max_value)), int(np.ceil(max_value))/100), linewidth = 3, color = color_loop)
else:
pos = np.linspace(0, max_value, 101)
for scen_type, color_loop in zip(scenario_type_list, color_list):
kde_est = stats.gaussian_kde(data_type_dict[scen_type])
ax_loop.fill_between(pos, kde_est(pos), edgecolor = 'black', alpha = 0.6, facecolor = color_loop)
sri_dict = {}
sri_dict['validation'] = validation_values['delta_forecastSRI']
sri_dict['scenario'] = self.values['delta_forecastSRI']
sri_cutoffs = {}
sri_cutoffs['W'] = [9.2, 100]
sri_cutoffs['AN'] = [7.8, 9.2]
sri_cutoffs['BN'] = [6.6, 7.8]
sri_cutoffs['D'] = [5.4, 6.6]
sri_cutoffs['C'] = [0.0, 5.4]
wyt_list = ['W', 'AN', 'BN', 'D', 'C']
scenario_type_list = ['validation', 'scenario']
colors = sns.color_palette('RdBu_r', n_colors = 5)
percent_years = {}
for wyt in wyt_list:
percent_years[wyt] = np.zeros(len(scenario_type_list))
for scen_cnt, scen_type in enumerate(scenario_type_list):
ann_sri = []
for x_cnt, x in enumerate(sri_dict[scen_type]):
if sri_dict[scen_type].index.month[x_cnt] == 9 and sri_dict[scen_type].index.day[x_cnt] == 30:
ann_sri.append(x)
ann_sri = np.array(ann_sri)
for x_cnt, wyt in enumerate(wyt_list):
mask_value = (ann_sri >= sri_cutoffs[wyt][0]) & (ann_sri < sri_cutoffs[wyt][1])
percent_years[wyt][scen_cnt] = len(ann_sri[mask_value])/len(ann_sri)
colors = sns.color_palette('RdBu_r', n_colors = 5)
last_type = np.zeros(len(scenario_type_list))
for cnt, x in enumerate(wyt_list):
ax4.bar(['Validated Period\n(1997-2016)', 'Extended Simulation\n(1906-2016)'], percent_years[x], alpha = 1.0, label = wyt, facecolor = colors[cnt], edgecolor = 'black', bottom = last_type)
last_type += percent_years[x]
ax1.set_xlim([0.0, 500.0* np.ceil(max_y_val[0]/500.0)])
ax2.set_xlim([0.0, 500.0* np.ceil(max_y_val[1]/500.0)])
ax3.set_xlim([0.0, 1.0])
ax4.set_ylim([0, 1.15])
ax1.set_yticklabels('')
ax2.set_yticklabels('')
label_list = []
loc_list = []
for value_x in range(0, 120, 20):
label_list.append(str(value_x) + ' %')
loc_list.append(value_x/100.0)
ax4.set_yticklabels(label_list)
ax4.set_yticks(loc_list)
ax3.set_xticklabels(label_list)
ax3.set_xticks(loc_list)
ax3.set_yticklabels(['4', '8', '16', '32', '64', '125', '250', '500', '1000', '2000', '4000'])
ax3.set_yticks([np.log(4), np.log(8), np.log(16), np.log(32), np.log(64), np.log(125), np.log(250), np.log(500), np.log(1000), np.log(2000), np.log(4000)])
ax3.set_ylim([np.log(4), np.log(4000)])
for ax, x_lab, y_lab in zip([ax1, ax2, ax3, ax4], x_label_list, y_label_list):
ax.set_xlabel(x_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.set_ylabel(y_lab, fontsize = 16, fontname = 'Gill Sans MT', fontweight = 'bold')
ax.grid(False)
for tick in ax.get_xticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
for tick in ax.get_yticklabels():
tick.set_fontname('Gill Sans MT')
tick.set_fontsize(14)
legend_elements = []
for x_cnt, x in enumerate(legend_label_names1):
legend_elements.append(Patch(facecolor = color_list[x_cnt], edgecolor = 'black', label = x))
ax1.legend(handles = legend_elements, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
legend_elements_2 = []
for x_cnt, x in enumerate(legend_label_names2):
legend_elements_2.append(Patch(facecolor = colors[x_cnt], edgecolor = 'black', label = x))
ax4.legend(handles = legend_elements_2, loc = 'upper left', framealpha = 0.7, shadow = True, prop={'family':'Gill Sans MT','weight':'bold','size':14})
plt.savefig(folder_name + figure_name + '_' + plot_name + '.png', dpi = 150, bbox_inches = 'tight', pad_inches = 0.0)
if show_plot:
plt.show()
plt.close()
def make_deliveries_by_district(self, folder_name, figure_name, plot_name, scenario_name, show_plot):
if plot_name == 'annual':
name_bridge = {}
name_bridge['semitropic'] = 'KER01'
name_bridge['westkern'] = 'KER02'
name_bridge['wheeler'] = 'KER03'
name_bridge['kerndelta'] = 'KER04'
name_bridge['arvin'] = 'KER05'
name_bridge['belridge'] = 'KER06'
name_bridge['losthills'] = 'KER07'
name_bridge['northkern'] = 'KER08'
name_bridge['northkernwb'] = 'KER08'
name_bridge['ID4'] = 'KER09'
name_bridge['sosanjoaquin'] = 'KER10'
name_bridge['berrenda'] = 'KER11'
name_bridge['buenavista'] = 'KER12'
name_bridge['cawelo'] = 'KER13'
name_bridge['rosedale'] = 'KER14'
name_bridge['shaffer'] = 'KER15'
name_bridge['henrymiller'] = 'KER16'
name_bridge['kwb'] = 'KER17'
name_bridge['b2800'] = 'KER17'
name_bridge['pioneer'] = 'KER17'
name_bridge['irvineranch'] = 'KER17'
name_bridge['kernriverbed'] = 'KER17'
name_bridge['poso'] = 'KER17'
name_bridge['stockdale'] = 'KER17'
name_bridge['delano'] = 'KeT01'
name_bridge['kerntulare'] = 'KeT02'
name_bridge['lowertule'] = 'TUL01'
name_bridge['tulare'] = 'TUL02'
name_bridge['lindmore'] = 'TUL03'
name_bridge['saucelito'] = 'TUL04'
name_bridge['porterville'] = 'TUL05'
name_bridge['lindsay'] = 'TUL06'
name_bridge['exeter'] = 'TUL07'
name_bridge['terra'] = 'TUL08'
name_bridge['teapot'] = 'TUL09'
name_bridge['bakersfield'] = 'BAK'
name_bridge['fresno'] = 'FRE'
name_bridge['southbay'] = 'SOB'
name_bridge['socal'] = 'SOC'
name_bridge['tehachapi'] = 'TEH'
name_bridge['tejon'] = 'TEJ'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'PIX'
name_bridge['chowchilla'] = 'CHW'
name_bridge['maderairr'] = 'MAD'
name_bridge['fresnoid'] = 'FSI'
name_bridge['westlands'] = 'WTL'
name_bridge['panoche'] = 'PAN'
name_bridge['sanluiswater'] = 'SLW'
name_bridge['delpuerto'] = 'DEL'
elif plot_name == 'monthly':
name_bridge = {}
name_bridge['semitropic'] = 'Semitropic Water Storage District'
name_bridge['westkern'] = 'West Kern Water District'
name_bridge['wheeler'] = 'Wheeler Ridge-Maricopa Water Storage District'
name_bridge['kerndelta'] = 'Kern Delta Water District'
name_bridge['arvin'] = 'Arvin-Edison Water Storage District'
name_bridge['belridge'] = 'Belridge Water Storage District'
name_bridge['losthills'] = 'Lost Hills Water District'
name_bridge['northkern'] = 'North Kern Water Storage District'
name_bridge['northkernwb'] = 'North Kern Water Storage District'
name_bridge['ID4'] = 'Urban'
name_bridge['sosanjoaquin'] = 'Southern San Joaquin Municipal Utility District'
name_bridge['berrenda'] = 'Berrenda Mesa Water District'
name_bridge['buenavista'] = 'Buena Vista Water Storage District'
name_bridge['cawelo'] = 'Cawelo Water District'
name_bridge['rosedale'] = 'Rosedale-Rio Bravo Water Storage District'
name_bridge['shaffer'] = 'Shafter-Wasco Irrigation District'
name_bridge['henrymiller'] = 'Henry Miller Water District'
name_bridge['kwb'] = 'Kern Water Bank Authority'
name_bridge['b2800'] = 'Kern Water Bank Authority'
name_bridge['pioneer'] = 'Kern Water Bank Authority'
name_bridge['irvineranch'] = 'Kern Water Bank Authority'
name_bridge['kernriverbed'] = 'Kern Water Bank Authority'
name_bridge['poso'] = 'Kern Water Bank Authority'
name_bridge['stockdale'] = 'Kern Water Bank Authority'
name_bridge['delano'] = 'Delano-Earlimart Irrigation District'
name_bridge['kerntulare'] = 'Kern-Tulare Water District'
name_bridge['lowertule'] = 'Lower Tule River Irrigation District'
name_bridge['tulare'] = 'Tulare Irrigation District'
name_bridge['lindmore'] = 'Lindmore Irrigation District'
name_bridge['saucelito'] = 'Saucelito Irrigation District'
name_bridge['porterville'] = 'Porterville Irrigation District'
name_bridge['lindsay'] = 'Lindsay-Strathmore Irrigation District'
name_bridge['exeter'] = 'Exeter Irrigation District'
name_bridge['terra'] = 'Terra Bella Irrigation District'
name_bridge['teapot'] = 'Tea Pot Dome Water District'
name_bridge['bakersfield'] = 'Urban'
name_bridge['fresno'] = 'Urban'
name_bridge['southbay'] = 'Urban'
name_bridge['socal'] = 'Urban'
name_bridge['tehachapi'] = 'Tehachapi - Cummings County Water District'
name_bridge['tejon'] = 'Tejon-Castac Water District'
name_bridge['centralcoast'] = 'SLO'
name_bridge['pixley'] = 'Pixley Irrigation District'
name_bridge['chowchilla'] = 'Chowchilla Water District'
name_bridge['maderairr'] = 'Madera Irrigation District'
name_bridge['fresnoid'] = 'Fresno Irrigation District'
name_bridge['westlands'] = 'Westlands Water District'
name_bridge['panoche'] = 'Panoche Water District'
name_bridge['sanluiswater'] = 'San Luis Water District'
name_bridge['delpuerto'] = 'Del Puerto Water District'
name_bridge['alta'] = 'Alta Irrigation District'
name_bridge['consolidated'] = 'Consolidated Irrigation District'
location_type = plot_name
self.total_irrigation = {}
self.total_recharge = {}
self.total_pumping = {}
self.total_flood_purchases = {}
self.total_recovery_rebate = {}
self.total_recharge_sales = {}
self.total_recharge_purchases = {}
self.total_recovery_sales = {}
self.total_recovery_purchases = {}
for bank in self.bank_list:
self.total_irrigation[bank.name] = np.zeros(self.number_years*12)
self.total_recharge[bank.name] = np.zeros(self.number_years*12)
self.total_pumping[bank.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[bank.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[bank.name] = np.zeros(self.number_years*12)
for district in self.district_list:
self.total_irrigation[district.name] = np.zeros(self.number_years*12)
self.total_recharge[district.name] = np.zeros(self.number_years*12)
self.total_pumping[district.name] = np.zeros(self.number_years*12)
self.total_flood_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_rebate[district.name] = np.zeros(self.number_years*12)
self.total_recharge_sales[district.name] = np.zeros(self.number_years*12)
self.total_recharge_purchases[district.name] = np.zeros(self.number_years*12)
self.total_recovery_sales[district.name] = np.zeros(self.number_years*12)
self.total_recovery_purchases[district.name] = np.zeros(self.number_years*12)
date_list_labels = []
for year_num in range(self.starting_year, 2017):
start_month = 1
end_month = 13
if year_num == self.starting_year:
start_month = 10
if year_num == 2016:
end_month = 10
for month_num in range(start_month, end_month):
date_string_start = str(year_num) + '-' + str(month_num) + '-01'
date_list_labels.append(date_string_start)
for district in self.district_list:
inleiu_name = district.name + '_inleiu_irrigation'
inleiu_recharge_name = district.name + '_inleiu_recharge'
direct_recover_name = district.name + '_recover_banked'
indirect_surface_name = district.name + '_exchanged_SW'
indirect_ground_name = district.name + '_exchanged_GW'
inleiu_pumping_name = district.name + '_leiupumping'
pumping_name = district.name + '_pumping'
recharge_name = district.name + '_' + district.key + '_recharged'
numdays_month = [31, 28, 31, 30, 31, 30, 31, 31, 29, 31, 30, 31]
for year_num in range(0, self.number_years+1):
year_str = str(year_num + self.starting_year)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year - 1)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_recharge_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_recharge
#GW recovery
if direct_recover_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), direct_recover_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), direct_recover_name].values[0]
#if classifying by physical location, attribute to district recieving water (as irrigation)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumnping for inleiu recovery
if inleiu_pumping_name in self.values:
if month_num == 10:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0]
else:
total_leiupumping = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_pumping_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_pumping_name].values[0]
#if classifying by physical location, to district operating the bank
self.total_pumping[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_sales[district.name][year_num*12 + month_num - 10] += total_leiupumping
self.total_recovery_rebate[district.name][year_num*12 + month_num - 10] += total_leiupumping
#Recharge, in- and out- of district
if recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_name].values[0]
self.total_recharge[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.bank_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge[bank_name.name][year_num*12 + month_num - 10] += total_recharge
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
for bank_name in self.leiu_list:
bank_recharge_name = district.name + '_' + bank_name.key + '_recharged'
if bank_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), bank_recharge_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), bank_recharge_name].values[0]
self.total_recharge_purchases[district.name][year_num*12 + month_num - 10] += total_recharge
#Contract deliveries
for contract in self.contract_list:
delivery_name = district.name + '_' + contract.name + '_delivery'
recharge_contract_name = district.name + '_' + contract.name + '_recharged'
flood_irr_name = district.name + '_' + contract.name + '_flood_irrigation'
flood_name = district.name + '_' + contract.name + '_flood'
###All deliveries made from a district's contract
if delivery_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), delivery_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), delivery_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
##Deliveries made for recharge are subtracted from the overall contract deliveries
if recharge_contract_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0]
else:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), recharge_contract_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), recharge_contract_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] -= total_recharge
#flood water used for irrigation - always attribute as irrigation
if flood_irr_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_irr_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_irr_name].values[0]
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
if flood_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), flood_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), flood_name].values[0]
self.total_flood_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##Pumping (daily values aggregated by year)
if pumping_name in self.values:
annual_pumping = 0.0
for x in range(0, len(self.index)):
monthly_index = (self.year[x] - self.starting_year)*12 + self.month[x] - 10
if self.day_month[x] == 1:
self.total_pumping[district.name][monthly_index] += annual_pumping
annual_pumping = 0.0
else:
annual_pumping += self.values.loc[self.index[x], pumping_name]
self.total_pumping[district.name][-1] += annual_pumping
#Get values for any private entities within the district
for private_name in self.private_list:
private = private_name.name
if district.key in self.private_districts[private]:
inleiu_name = private + '_' + district.key + '_inleiu_irrigation'
inleiu_recharge_name = private + '_' + district.key + '_inleiu_irrigation'
direct_recover_name = private + '_' + district.key + '_recover_banked'
indirect_surface_name = private + '_' + district.key + '_exchanged_SW'
indirect_ground_name = private + '_' + district.key + '_exchanged_GW'
inleiu_pumping_name = private + '_' + district.key + '_leiupumping'
pumping_name = private + '_' + district.key + '_pumping'
recharge_name = private + '_' + district.key + '_' + district.key + '_recharged'
for year_num in range(0, self.number_years - 1):
year_str = str(year_num + self.starting_year + 1)
start_month = 1
end_month = 13
if year_num == 0:
start_month = 10
if year_num == self.number_years - 1:
end_month = 10
for month_num in range(start_month, end_month):
if month_num == 1:
month_num_prev = '12'
year_str_prior = str(year_num + self.starting_year)
end_day_prior = str(numdays_month[11])
else:
month_num_prev = str(month_num - 1)
year_str_prior = str(year_num + self.starting_year + 1)
end_day_prior = str(numdays_month[month_num-2])
date_string_current = year_str + '-' + str(month_num) + '-' + str(numdays_month[month_num-1])
date_string_prior = year_str_prior + '-' + month_num_prev + '-' + end_day_prior
###GW/SW exchanges,
if indirect_surface_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_surface_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_surface_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
###GW/SW exchanges,
if indirect_ground_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), indirect_ground_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), indirect_ground_name].values[0]
#count irrigation deliveries for district that gave up SW (for GW in canal)
self.total_recovery_purchases[district.name][year_num*12 + month_num - 10] += total_delivery
##In leiu deliveries for irrigation
if inleiu_name in self.values:
if month_num == 10:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0]
else:
total_delivery = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_name].values[0] - self.values.loc[pd.DatetimeIndex([date_string_prior]), inleiu_name].values[0]
#attibute inleiu deliveries for irrigation to district operating the bank
self.total_irrigation[district.name][year_num*12 + month_num - 10] += total_delivery
self.total_recharge_sales[district.name][year_num*12 + month_num - 10] += total_delivery
if inleiu_recharge_name in self.values:
if month_num == 10:
total_recharge = self.values.loc[pd.DatetimeIndex([date_string_current]), inleiu_recharge_name].values[0]
else:
total_recharge = self.values.loc[
|
pd.DatetimeIndex([date_string_current])
|
pandas.DatetimeIndex
|
import pandas as pd
import os
def save_csv(df: pd.DataFrame, output_dir: str, filename: str) -> str:
"""
Save CSV
Parameters
----------
df : pd.DataFrame
Original
output_dir : str
Folder for save df
filename : str
Name of file
Returns
---------
filepath : str
Absolute
"""
filepath: str = os.path.join(output_dir, filename)
df.to_csv(filepath)
return filepath
def get_revenue_per_store(df: pd.DataFrame) -> pd.DataFrame:
"""
Get revenues
Parameters
----------
Returns
---------
pd.DataFrame
Computed
"""
# Revenues
revenue = (
df.groupby(by="store")
.sum()
.total.to_frame()
.rename(columns={"total": "revenue"})
)
return revenue
def get_pct_by_pay_method(df: pd.DataFrame, pay_method: str) -> pd.DataFrame:
"""
Get percentage by payment method
Parameters
----------
df : pd.DataFrame
Original
pay_method : str
Choose between cc or cash
Returns
---------
pd.DataFrame
Computed
"""
# Filter and group-by
revenue_per_store = get_revenue_per_store(df)
revenue_per_store_filtered = (
df[df.pay == pay_method]
.groupby(by="store")
.sum()
.total.to_frame()
.rename(columns={"total": f"revenue_{pay_method}"})
)
# Match total and revenue per pay method
temp_df =
|
pd.concat([revenue_per_store, revenue_per_store_filtered], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 27 17:50:38 2020
@author: Miguel <NAME>
Descrption: Script for reading the ISO NE dataset for load profiling in the context
of the paper of NMF Correlated. It takes time series of real time demand, dew point,
and temperature of a particular load zone selected by "location":
0: ME
1: NH
2: VT
3: CT
4: RI
5: SEMASS
6: WCMASS
7: NEMASSBOST
Output: Data_test and Data_train, both of them data structures containing:
Date, Day of the year, 24 values of hourly Real time,24 values of hourly Temperature,
24 values of hourly Dew point and the Weekday. The split into train and test of
the whole data set is defined by a date specified by the variables "day", "month" and "year/"
"""
import pandas as pd
import datetime
import scipy
import scipy.io
import numpy as np
import pickle
from pathlib import Path
LOCATIONS = ['ME','NH','VT','CT','RI','SEMASS','WCMASS','NEMASSBOST']
project_path = Path("/Users/apple/Desktop/PASAR")
#==================================================================
# SELEECT DATE THAT SPLITS DATA SET INTO TRAIN AND TEST
#==================================================================
#==================================================================
start_day_train_val = 1
start_month_train_val = 1
start_year_train_val= 2011
end_day_train_val = 31
end_month_train_val = 12
end_year_train_val = 2017
start_day_test = 1
start_month_test = 1
start_year_test = 2018
end_day_test = 31
end_month_test = 12
end_year_test = 2018
#==================================================================
data_folder = Path("/Users/apple/Desktop/PASAR/ISO_NE_Dataset_Final/Nestor")
filename = "iso_ne.pickle"
file_to_open = data_folder / filename
pickle_in=open(file_to_open,'rb')
iso_ne=pickle.load(pickle_in)
for location in range(0,8):
location_name = LOCATIONS[location]
data2011=iso_ne[location][0]
data2012=iso_ne[location][1]
data2013=iso_ne[location][2]
data2014=iso_ne[location][3]
data2015=iso_ne[location][4]
data2016=iso_ne[location][5]
data2017=iso_ne[location][6]
data2018=iso_ne[location][7]
Y2011=data2011[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2012=data2012[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2013=data2013[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2014=data2014[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2015=data2015[['Date','Hour','DEMAND','DryBulb','DewPnt']]
Y2016=data2016[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Y2017=data2017[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Y2018=data2018[['Date','Hr_End','RT_Demand','Dry_Bulb','Dew_Point']]
Aux2011 = pd.to_datetime(Y2011['Date']).dt.strftime('%d-%b-%Y')
Dates2011 = pd.Series(list(Aux2011[0::24]))
DoWeek2011 = pd.to_datetime(Dates2011).dt.day_name()
Load2011 = pd.Series(list(Y2011['DEMAND'].values.reshape(-1,24)))
Temperature2011 = pd.Series(list(Y2011['DryBulb'].values.reshape(-1,24)))
DewPoint2011 = pd.Series(list(Y2011['DewPnt'].values.reshape(-1,24)))
del Y2011
frame2011 = { 'Date': Dates2011, 'Weekday': DoWeek2011}
frame2011['Load'] = list(Load2011)
frame2011['Temperature'] = list(Temperature2011)
frame2011['DewPoint'] = list(DewPoint2011)
Y2011 = pd.DataFrame(frame2011)
Aux2012 = pd.to_datetime(Y2012['Date']).dt.strftime('%d-%b-%Y')
Dates2012 = pd.Series(list(Aux2012[0::24]))
DoWeek2012 = pd.to_datetime(Dates2012).dt.day_name()
Load2012 = pd.Series(list(Y2012['DEMAND'].values.reshape(-1,24)))
Temperature2012 = pd.Series(list(Y2012['DryBulb'].values.reshape(-1,24)))
DewPoint2012 = pd.Series(list(Y2012['DewPnt'].values.reshape(-1,24)))
del Y2012
frame2012 = { 'Date': Dates2012, 'Weekday': DoWeek2012}
frame2012['Load'] = list(Load2012)
frame2012['Temperature'] = list(Temperature2012)
frame2012['DewPoint'] = list(DewPoint2012)
Y2012 =
|
pd.DataFrame(frame2012)
|
pandas.DataFrame
|
import sys
sys.path.append('../')
import unittest
import numpy as np
import pandas as pd
import shutil
import os
from azureml.studio.core.io.data_frame_directory import load_data_frame_from_directory, save_data_frame_to_directory
import invoker
class TestErrorInput(unittest.TestCase):
def setUp(self):
self.__input_path = './functional_test_input_data_frame_directory'
self.__detect_mode = 'AnomalyOnly'
self.__timestamp_column = '%7B%22isFilter%22%3Atrue%2C%22rules%22%3A%5B%7B%22exclude%22%3Afalse%2C%22ruleType%22%3A%22ColumnNames%22%2C%22columns%22%3A%5B%22timestamp%22%5D%7D%5D%7D'
self.__value_column = '%7B%22isFilter%22%3Atrue%2C%22rules%22%3A%5B%7B%22exclude%22%3Afalse%2C%22ruleType%22%3A%22ColumnNames%22%2C%22columns%22%3A%5B%22value%22%5D%7D%5D%7D'
self.__batch_size = 2000
self.__threshold = 0.3
self.__sensitivity = 99
self.__append_mode = True
self.compute_stats_in_visualization = True,
self.__output_path = './functional_test_output_data_frame_directory'
def tearDown(self):
self.deleteDataFrameDirectory()
def deleteDataFrameDirectory(self):
if os.path.exists(self.__input_path):
shutil.rmtree(self.__input_path)
if os.path.exists(self.__output_path):
shutil.rmtree(self.__output_path)
def testAnomalyOnlyMode(self):
df = pd.DataFrame()
df['timestamp'] = pd.date_range(start='2020-01-01', periods=200, freq='1D')
df['value'] = np.sin(np.linspace(1, 20, 200))
save_data_frame_to_directory(self.__input_path, df)
invoker.invoke(self.__input_path, self.__detect_mode, self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.compute_stats_in_visualization, self.__output_path)
result = load_data_frame_from_directory(self.__output_path).data
self.assertEqual(result.shape[0], 200)
self.assertTrue('value' in result.columns)
self.assertTrue('isAnomaly' in result.columns)
self.assertTrue('score' in result.columns)
self.assertTrue('expectedValue' not in result.columns)
self.assertTrue('upperBoundary' not in result.columns)
self.assertTrue('lowerBoundary' not in result.columns)
def testAnomalyAndMargin(self):
df = pd.DataFrame()
df['timestamp'] = pd.date_range(start='2020-01-01', periods=200, freq='1D')
df['value'] = np.sin(np.linspace(1, 20, 200))
save_data_frame_to_directory(self.__input_path, df)
invoker.invoke(self.__input_path, "AnomalyAndMargin", self.__timestamp_column, self.__value_column,
self.__batch_size, self.__threshold, self.__sensitivity, self.__append_mode,
self.compute_stats_in_visualization, self.__output_path)
result = load_data_frame_from_directory(self.__output_path).data
self.assertEqual(result.shape[0], 200)
self.assertTrue('value' in result.columns)
self.assertTrue('isAnomaly' in result.columns)
self.assertTrue('score' in result.columns)
self.assertTrue('expectedValue' in result.columns)
self.assertTrue('upperBoundary' in result.columns)
self.assertTrue('lowerBoundary' in result.columns)
def testBatchMode(self):
df = pd.DataFrame()
df['timestamp'] =
|
pd.date_range(start='2020-01-01', periods=200, freq='1D')
|
pandas.date_range
|
import numpy as np
import os
import pandas as pd
'''
Takes in a pair file of .ades and .dat and extracts the channel names and the corresponding SEEG time series
places them into four different files
- raw numpy
- headers csv
- annotations csv
- channels csv
Which follows format that we place data from .edf files. Most data is empty since .ades does not get alot of these
data points.
'''
def rawtonumpy(raweeg, outputdatafile):
# open output Numpy file to write
npfile = open(outputdatafile, 'wb')
np.save(npfile, raweeg)
def chantocsv(chanlabels, samplerate, numsamps, outputchanfile):
##################### 2. Import channel headers ########################
# create list with dataframe column channel header names
channelheaders = [[
'Channel Number',
'Labels',
'Physical Maximum',
'Physical Minimum',
'Digital Maximum',
'Digital Minimum',
'Sample Frequency',
'Num Samples',
'Physical Dimensions',
]]
# get the channel labels of file and convert to list of strings
# -> also gets rid of excessive characters
chanlabels = [str(x).replace('POL', '').replace(' ', '')
for x in chanlabels]
# read chan header data from each chan for each column and append to list
for i in range(len(chanlabels)):
channelheaders.append([
i + 1,
chanlabels[i],
'',
'',
'',
'',
samplerate,
numsamps,
'',
])
# create CSV file of channel header names and data
channelheaders_df = pd.DataFrame(data=channelheaders)
# create CSV file of file header names and data
channelheaders_df.to_csv(outputchanfile, index=False, header=False)
def annotationtocsv(outputannotationsfile):
##################### 3. Import File Annotations ########################
# create list
annotationheaders = [[
'Time (sec)',
'Duration',
'Description'
]]
for n in np.arange(0):
annotationheaders.append([
'',
'',
''
])
annotationheaders_df = pd.DataFrame(data=annotationheaders)
# create CSV file of channel header names and data
annotationheaders_df.to_csv(
outputannotationsfile,
index=False,
header=False)
def headerstocsv(samplerate, numsamps, outputheadersfile):
# create list with dataframe column file header names
fileheaders = [[
'pyedfib Version',
'Birth Date',
'Gender',
'Start Date (D-M-Y)',
'Start Time (H-M-S)',
'Patient Code',
'Equipment',
'Data Record Duration (s)',
'Number of Data Records in File',
'Number of Annotations in File',
'Sample Frequency',
'Samples in File',
'Physical Dimension'
]]
# append file header data for each dataframe column to list
# startdate = str(edffile.getStartdatetime().day) + '-' + str(edffile.getStartdatetime().month) + '-' + str(edffile.getStartdatetime().year)
# starttime = str(edffile.getStartdatetime().hour) + '-' + str(edffile.getStartdatetime().minute) + '-' + str(edffile.getStartdatetime().second)
fileheaders.append([
'',
'',
'',
'',
'',
'',
'',
numsamps / float(samplerate),
'',
'',
samplerate,
numsamps,
'',
])
# create dataframes from array of meta data
fileheaders_df =
|
pd.DataFrame(data=fileheaders)
|
pandas.DataFrame
|
import sys
sys.path.append('..')
import pandas as pd
import numpy as np
import os
from scripts.run_links import run
from config import config
results_dir = config['results_dir']
if __name__ == "__main__":
np.seterr(all='ignore')
dataset = '5ht6_KRFP_pca5'
keys = ['cov_type', 'link_weight', 'k']
metrics = ['links_accuracy']
results = pd.DataFrame({'dataset': []})
for k in [5, 10, 15]:
for lw in [1,2,4,8]:
# n_folds - default 5
params = {'seed': 42,
'n_init': 10,
'n_jobs': 10,
'k': k,
'gauss_per_clust': 1,
'cov_type': 'fixed_spherical',
'verbose': False,
'link_weight': lw}
key_params = [p for p in params if p in keys]
res = {key: [params[key]] for key in key_params}
res['dataset'] = [dataset]
res_filename = "_".join(["{}:{}".format(key, v[0]) for key, v in res.iteritems()]) + ".csv"
res_path = os.path.join(results_dir, res_filename)
if not os.path.exists(res_path):
ret = run(dataset, metrics, params)
res.update({m: [ret[m]] for m in metrics})
part_results =
|
pd.DataFrame.from_dict(res)
|
pandas.DataFrame.from_dict
|
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wfa_cardinality_estimation_evaluation_framework.evaluations.tests.report_generator."""
import os
import re
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
from wfa_cardinality_estimation_evaluation_framework.estimators import exact_set
from wfa_cardinality_estimation_evaluation_framework.evaluations import analyzer
from wfa_cardinality_estimation_evaluation_framework.evaluations import configs
from wfa_cardinality_estimation_evaluation_framework.evaluations import evaluator
from wfa_cardinality_estimation_evaluation_framework.evaluations import report_generator
from wfa_cardinality_estimation_evaluation_framework.evaluations.data import evaluation_configs
from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator
class ReportGeneratorTest(parameterized.TestCase):
def setUp(self):
super(ReportGeneratorTest, self).setUp()
exact_set_lossless = configs.SketchEstimatorConfig(
name=evaluation_configs.construct_sketch_estimator_config_name(
sketch_name='exact_set',
sketch_config='set',
estimator_name='lossless'),
sketch_factory=exact_set.ExactMultiSet.get_sketch_factory(),
estimator=exact_set.LosslessEstimator())
exact_set_less_one = configs.SketchEstimatorConfig(
name=evaluation_configs.construct_sketch_estimator_config_name(
sketch_name='exact_set',
sketch_config='set',
estimator_name='less_one'),
sketch_factory=exact_set.ExactMultiSet.get_sketch_factory(),
estimator=exact_set.LessOneEstimator(),
sketch_noiser=exact_set.AddRandomElementsNoiser(
num_random_elements=0, random_state=np.random.RandomState()))
self.sketch_estimator_config_list = (exact_set_lossless, exact_set_less_one)
self.evaluation_config = configs.EvaluationConfig(
name='test_evaluation',
num_runs=2,
scenario_config_list=[
configs.ScenarioConfig(
name='ind1',
set_generator_factory=(
set_generator.IndependentSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=10, num_sets=5, set_size=2))),
configs.ScenarioConfig(
name='ind2',
set_generator_factory=(
set_generator.IndependentSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=10, num_sets=5, set_size=2))),
])
self.evaluation_run_name = 'test_run'
def _run_evaluation_and_simulation(out_dir):
self.evaluator = evaluator.Evaluator(
evaluation_config=self.evaluation_config,
sketch_estimator_config_list=self.sketch_estimator_config_list,
run_name=self.evaluation_run_name,
out_dir=out_dir)
self.evaluator()
self.analyzer = analyzer.CardinalityEstimatorEvaluationAnalyzer(
out_dir=out_dir,
evaluation_directory=out_dir,
evaluation_run_name=self.evaluation_run_name,
evaluation_name=self.evaluation_config.name,
estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])
self.analyzer()
self.run_evaluation_and_simulation = _run_evaluation_and_simulation
exact_set_lossless_freq3 = configs.SketchEstimatorConfig(
name=evaluation_configs.construct_sketch_estimator_config_name(
sketch_name='exact_set',
sketch_config='set',
estimator_name='lossless',
max_frequency=3),
sketch_factory=exact_set.ExactMultiSet.get_sketch_factory(),
estimator=exact_set.LosslessEstimator(),
max_frequency=3)
exact_set_less_one_freq3 = configs.SketchEstimatorConfig(
name=evaluation_configs.construct_sketch_estimator_config_name(
sketch_name='exact_set',
sketch_config='set',
estimator_name='less_one',
max_frequency=3),
sketch_factory=exact_set.ExactMultiSet.get_sketch_factory(),
estimator=exact_set.LessOneEstimator(),
sketch_noiser=exact_set.AddRandomElementsNoiser(
num_random_elements=0, random_state=np.random.RandomState()),
max_frequency=3)
self.frequency_sketch_estimator_config_list = (exact_set_lossless_freq3,
exact_set_less_one_freq3)
self.frequency_evaluation_run_name = 'freq_test_run'
def _run_frequency_evaluation_and_simulation(out_dir):
self.evaluator = evaluator.Evaluator(
evaluation_config=self.evaluation_config,
sketch_estimator_config_list=(
self.frequency_sketch_estimator_config_list),
run_name=self.frequency_evaluation_run_name,
out_dir=out_dir)
self.evaluator()
self.analyzer = analyzer.FrequencyEstimatorEvaluationAnalyzer(
out_dir=out_dir,
evaluation_directory=out_dir,
evaluation_run_name=self.frequency_evaluation_run_name,
evaluation_name=self.evaluation_config.name,
estimable_criteria_list=[(0.05, 0.95), (1.01, 0.9)])
self.analyzer()
self.run_frequency_evaluation_and_simulation = (
_run_frequency_evaluation_and_simulation)
def test_parse_sketch_estimator_name(self):
sketch_estimator_name = 'vector_of_counts-4096-sequential-ln3-infty'
parsed_name = report_generator.ReportGenerator.parse_sketch_estimator_name(
sketch_estimator_name)
expected = {
evaluation_configs.SKETCH: 'vector_of_counts',
evaluation_configs.SKETCH_CONFIG: '4096',
evaluation_configs.SKETCH_EPSILON: 'ln3',
evaluation_configs.ESTIMATOR: 'sequential',
evaluation_configs.ESTIMATE_EPSILON: 'infty',
}
self.assertEqual(parsed_name, expected)
def test_add_parsed_sketch_estimator_name_cols(self):
df = pd.DataFrame({
'sketch_estimator': ['vector_of_counts-4096-sequential-ln3-infty',
'bloom_filter-1e6-union_estimator-infty-ln3']})
result = (
report_generator.ReportGenerator
.add_parsed_sketch_estimator_name_cols(df, 'sketch_estimator'))
expected = pd.DataFrame({
'sketch_estimator': ['vector_of_counts-4096-sequential-ln3-infty',
'bloom_filter-1e6-union_estimator-infty-ln3'],
evaluation_configs.SKETCH: ['vector_of_counts', 'bloom_filter'],
evaluation_configs.SKETCH_CONFIG: ['4096', '1e6'],
evaluation_configs.ESTIMATOR: ['sequential', 'union_estimator'],
evaluation_configs.SKETCH_EPSILON: ['ln3', 'infty'],
evaluation_configs.ESTIMATE_EPSILON: ['infty', 'ln3'],
})
try:
|
pd.testing.assert_frame_equal(result, expected)
|
pandas.testing.assert_frame_equal
|
import sys
import treelib
import pandas as pd
from treelib import Tree
from tqdm import tqdm
from collections import OrderedDict, deque
from copy import deepcopy
from functools import partial
from tr.core.tree_utils import build_fleet_state, order_fleet_state
from tr.core.tree_utils import NodeScheduleDays, generate_code
from tr.core.tree_utils import fleet_operate_A, fleet_operate_C
from tr.core.tree_utils import generate_D_check_code
from tr.core.utils import advance_date, save_pickle, load_pickle
# the order of this list reflects an heuristc (do maintenance first)
maintenance_actions = [1, 0]
type_checks = ['A', 'C'] # type of checks
sys.setrecursionlimit(1500) # recurssion limit is reached
class TreeDaysPlanner:
def __init__(self, calendar, fleet, config_params):
self.calendar = calendar
self.fleet = fleet
self.cp = config_params
self.calendar_tree = {'A': Tree(), 'C': Tree(), 'A-RL': Tree()}
iso_str = '1/1/2022'
self.daterinos = pd.to_datetime(iso_str, format='%m/%d/%Y')
self.removed_aircrafts = OrderedDict()
try:
self.phased_out = load_pickle("build/check_files/phased_out.pkl")
self.final_calendar = load_pickle("build/check_files/C_checks.pkl")
except:
self.phased_out = OrderedDict()
self.final_calendar = {'A': {}, 'C': {}}
try:
metrics_dict = load_pickle("metrics_dict")
self.metrics(metrics_dict)
except:
pass
self.utilization_ratio, self.code_generator, self.tats, self.finale_schedule = \
self.__build_calendar_helpers()
for type_check in type_checks:
fleet_state = build_fleet_state(self.fleet, type_check=type_check)
fleet_state = order_fleet_state(fleet_state)
root = NodeScheduleDays(calendar=OrderedDict(),
day=self.calendar.start_date,
fleet_state=fleet_state,
action_maintenance=0,
assignment=[],
tag="Root",
identifier="root")
self.calendar_tree[type_check].add_node(root)
fleet_state = build_fleet_state(self.fleet, type_check='A')
fleet_state = order_fleet_state(fleet_state)
root = NodeScheduleDays(calendar=OrderedDict(),
day=self.calendar.start_date,
fleet_state=fleet_state,
action_maintenance=0,
assignment=[],
tag="Root",
identifier="root")
self.calendar_tree['A-RL'].add_node(root)
self.schedule_counter = 0
self.all_schedules = deque(maxlen=100) # maintain only the top 10
def __build_calendar_helpers(self):
fleet_state = build_fleet_state(self.fleet, type_check='C')
code_generator = {'A': partial(generate_code, 4), 'C': partial(generate_code, 12)}
utilization_ratio = OrderedDict()
tats = OrderedDict()
finale_schedule = OrderedDict()
for _ in self.fleet.aircraft_info.keys():
utilization_ratio[_] = {}
finale_schedule[_] = {}
utilization_ratio[_]['DFH'] = self.fleet.aircraft_info[_]['DFH']
utilization_ratio[_]['DFC'] = self.fleet.aircraft_info[_]['DFC']
c_elapsed_time = self.fleet.aircraft_info[_]['C_ELAPSED_TIME']
c_elapsed_tats = list(c_elapsed_time.keys())
c_elapsed_tats.remove('Fleet')
new_code = fleet_state[_]['C-SN']
tats[_] = {} # code to tat
for tat in c_elapsed_tats:
new_code = code_generator['C'](new_code)
tats[_][new_code] = c_elapsed_time[tat]
return utilization_ratio, code_generator, tats, finale_schedule
# exceptions is a list of aircrafts that is in maintenance, thus not operating
def fleet_operate_one_day(self,
fleet_state,
date,
on_maintenance=[],
type_check='A',
on_c_maintenance=[],
type_D_check=False):
kwargs = {
'fleet_state': fleet_state,
'date': date,
'on_maintenance': on_maintenance,
'type_check': type_check,
'on_c_maintenance': on_c_maintenance,
'utilization_ratio': self.utilization_ratio,
'code_generator': self.code_generator
}
if type_check == 'A':
fleet_state = fleet_operate_A(**kwargs)
elif type_check == 'C':
kwargs['type_D_check'] = type_D_check
fleet_state = fleet_operate_C(**kwargs)
return fleet_state
def check_safety_fleet(self, fleet_state):
for key in fleet_state.keys():
if fleet_state[key]['TOTAL-RATIO'] >= 1:
return False
return True
def check_solved(self, current_calendar):
if len(current_calendar) > 0:
if list(current_calendar.keys())[-1] == self.daterinos:
return True
else:
return False
return False
def get_slots(self, date, check_type):
if check_type == 'A':
slots = self.calendar.calendar[date]['resources']['slots']['a-type']
elif check_type == 'C':
slots = self.calendar.calendar[date]['resources']['slots']['c-type']
return slots
# there is no variables, just one bolean variable, do maintenance or not
def expand_with_heuristic(self, node_schedule, type_check='A'):
if type_check == 'A':
childs = self.expand_a(node_schedule, type_check)
elif type_check == 'C':
childs = self.expand_c(node_schedule, type_check)
elif type_check == 'A-RL':
childs = self.expand_a(node_schedule, 'A')
return childs
def expand_a(self, node_schedule, type_check):
# recebe uma copia do calendario C para consultar
# precisamos do mesmo que a outra a dizer merged
calendar_0 = deepcopy(node_schedule.calendar)
calendar_1 = deepcopy(node_schedule.calendar)
fleet_state_0 = deepcopy(node_schedule.fleet_state)
fleet_state_1 = deepcopy(node_schedule.fleet_state)
on_c_maintenance_0 = deepcopy(node_schedule.on_c_maintenance)
on_c_maintenance_1 = deepcopy(node_schedule.on_c_maintenance)
on_c_maintenance_tats_0 = deepcopy(node_schedule.on_c_maintenance_tats)
on_c_maintenance_tats_1 = deepcopy(node_schedule.on_c_maintenance_tats)
on_maintenance_merged_0 = deepcopy(node_schedule.on_maintenance_merged)
on_maintenance_merged_1 = deepcopy(node_schedule.on_maintenance_merged)
merged_flag = False
day = node_schedule.day
day_old = day
childs = []
day = advance_date(day, days=int(1))
slots = self.get_slots(day, type_check)
iso_str = '5/2/2019'
daterinos = pd.to_datetime(iso_str, format='%m/%d/%Y')
if day == daterinos:
slots += 1
iso_str = '7/22/2019'
daterinos = pd.to_datetime(iso_str, format='%m/%d/%Y')
if day == daterinos:
slots += 1
on_maintenance = list(fleet_state_1.keys())[0]
ratio = fleet_state_0[on_maintenance]['TOTAL-RATIO']
if self.calendar_tree['A'].depth() <= self.cp['a-checks']['beta_1']:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_1'] else [0, 1]
elif self.calendar_tree['A'].depth() <= self.cp['a-checks']['beta_2']:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_2'] else [0, 1]
elif self.calendar_tree['A'].depth() <= self.cp['a-checks']['beta_3']:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_3'] else [0, 1]
elif self.calendar_tree['A'].depth() <= self.cp['a-checks']['beta_4']:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_4'] else [0, 1]
elif self.calendar_tree['A'].depth() <= self.cp['a-checks']['beta_5']:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_5'] else [0, 1]
else:
maintenance_actions = [1, 0] if ratio > self.cp['a-checks']['alpha_6'] else [0, 1]
# if self.calendar_tree['A'].depth() <= 239:
# maintenance_actions = [1, 0] if ratio > 0.78 else [0, 1]
# elif self.calendar_tree['A'].depth() <= 342:
# maintenance_actions = [1, 0] if ratio > 0.76 else [0, 1]
# elif self.calendar_tree['A'].depth() <= 726:
# maintenance_actions = [1, 0] if ratio > 0.76 else [0, 1]
# elif self.calendar_tree['A'].depth() <= 784:
# maintenance_actions = [1, 0] if ratio > 0.8 else [0, 1]
# elif self.calendar_tree['A'].depth() <= 926:
# maintenance_actions = [1, 0] if ratio > 0.8 else [0, 1]
# else:
# maintenance_actions = [1, 0] if ratio > 0.9 else [0, 1]
for _ in self.phased_out.keys():
if self.phased_out[_] == day:
print("{} phased out and is no longer in the fleet".format(_))
fleet_state_0.pop(_, None)
fleet_state_1.pop(_, None)
on_c_maintenance_all = deepcopy(on_c_maintenance_0)
for _ in on_c_maintenance_all:
print("{}-{} days remaining on maintenance".format(_, on_c_maintenance_tats_0[_]))
if on_c_maintenance_tats_0[_] == 0:
on_c_maintenance_0.remove(_)
on_c_maintenance_tats_0.pop(_, None)
on_c_maintenance_1.remove(_)
on_c_maintenance_tats_1.pop(_, None)
if _ in on_maintenance_merged_0:
on_maintenance_merged_0.remove(_)
on_maintenance_merged_1.remove(_)
else:
on_c_maintenance_tats_0[_] -= 1
on_c_maintenance_tats_1[_] -= 1
on_maintenance_merged = []
if self.final_calendar['C'][day]['MAINTENANCE']:
on_c_calendar = self.final_calendar['C'][day]['ASSIGNMENT']
on_c_calendar_tat = self.final_calendar['C'][day]['ASSIGNED STATE']['TAT']
on_c_maintenance_0.append(on_c_calendar)
on_c_maintenance_1.append(on_c_calendar)
on_c_maintenance_tats_0[on_c_calendar] = on_c_calendar_tat
on_c_maintenance_tats_1[on_c_calendar] = on_c_calendar_tat
if self.calendar_tree['A'].depth() <= 60:
if fleet_state_0[on_c_calendar]['TOTAL-RATIO'] > 0.40:
if on_c_calendar not in on_maintenance_merged_0:
on_maintenance_merged.append(on_c_calendar)
merged_flag = True
elif self.calendar_tree['A'].depth() <= 311:
if fleet_state_0[on_c_calendar]['TOTAL-RATIO'] > 0.50:
if on_c_calendar not in on_maintenance_merged_0:
on_maintenance_merged.append(on_c_calendar)
merged_flag = True
else:
if fleet_state_0[on_c_calendar]['TOTAL-RATIO'] > 0.70:
if on_c_calendar not in on_maintenance_merged_0:
on_maintenance_merged.append(on_c_calendar)
merged_flag = True
for action_value in maintenance_actions:
if action_value and self.calendar.calendar[day]['allowed'][
'public holidays'] and self.calendar.calendar[day]['allowed']['a-type']:
on_maintenance = list(fleet_state_1.keys())[0:slots]
# if flight hours are bellow 550, and there are 2 slots, use only one
if slots == 2 and fleet_state_1[on_maintenance[-1]]['FH-A'] <= 550:
on_maintenance = [list(fleet_state_1.keys())[0]]
for _ in on_maintenance_merged_0:
if _ in on_maintenance:
slots += 1
on_maintenance = list(fleet_state_1.keys())[0:slots]
on_maintenance.extend(on_maintenance_merged)
fleet_state_1 = self.fleet_operate_one_day(fleet_state_1, day_old, on_maintenance,
type_check, on_c_maintenance_1)
fleet_state_1 = order_fleet_state(fleet_state_1)
valid = self.check_safety_fleet(fleet_state_1)
if valid:
calendar_1[day] = {}
calendar_1[day]['SLOTS'] = slots
calendar_1[day]['MAINTENANCE'] = True
calendar_1[day]['ASSIGNMENT'] = on_maintenance
calendar_1[day]['MERGED FLAG'] = merged_flag
calendar_1[day]['ASSIGNED STATE'] = {}
for _ in on_maintenance:
calendar_1[day]['ASSIGNED STATE'][_] = fleet_state_1[_]
childs.append(
NodeScheduleDays(calendar_1,
day,
fleet_state_1,
action_value,
assignment=on_maintenance,
on_c_maintenance=on_c_maintenance_1,
on_c_maintenance_tats=on_c_maintenance_tats_1,
on_maintenance_merged=on_maintenance_merged))
if not action_value:
on_maintenance = []
fleet_state_0 = self.fleet_operate_one_day(fleet_state_0, day_old, on_maintenance,
type_check, on_c_maintenance_0)
fleet_state_0 = order_fleet_state(fleet_state_0)
valid = self.check_safety_fleet(fleet_state_0)
if valid:
calendar_0[day] = {}
calendar_0[day]['SLOTS'] = slots
calendar_0[day]['MAINTENANCE'] = False
calendar_0[day]['ASSIGNMENT'] = None
calendar_0[day]['MERGED FLAG'] = merged_flag
childs.append(
NodeScheduleDays(calendar_0,
day,
fleet_state_0,
action_value,
assignment=on_maintenance,
on_c_maintenance=on_c_maintenance_0,
on_c_maintenance_tats=on_c_maintenance_tats_0,
on_maintenance_merged=on_maintenance_merged))
return childs
def expand_c(self, node_schedule, type_check):
calendar_0 = deepcopy(node_schedule.calendar)
calendar_1 = deepcopy(node_schedule.calendar)
fleet_state_0 = deepcopy(node_schedule.fleet_state)
fleet_state_1 = deepcopy(node_schedule.fleet_state)
on_c_maintenance_0 = deepcopy(node_schedule.on_c_maintenance)
on_c_maintenance_1 = deepcopy(node_schedule.on_c_maintenance)
c_maintenance_counter = deepcopy(node_schedule.c_maintenance_counter)
on_c_maintenance_tats_0 = deepcopy(node_schedule.on_c_maintenance_tats)
on_c_maintenance_tats_1 = deepcopy(node_schedule.on_c_maintenance_tats)
fleet_phasing_out_0 = deepcopy(node_schedule.fleet_phasing_out)
fleet_phasing_out_1 = deepcopy(node_schedule.fleet_phasing_out)
phased_out_0 = deepcopy(node_schedule.phased_out)
phased_out_1 = deepcopy(node_schedule.phased_out)
day = node_schedule.day
day_old = day
childs = []
day = advance_date(day, days=int(1))
slots = self.get_slots(day, type_check)
on_maintenance = list(fleet_state_1.keys())[0]
ratio = fleet_state_0[on_maintenance]['TOTAL-RATIO']
if self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_1']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_1'] else [0, 1]
elif self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_2']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_2'] else [0, 1]
elif self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_3']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_2'] else [0, 1]
elif self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_4']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_4'] else [0, 1]
elif self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_5']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_5'] else [0, 1]
elif self.calendar_tree['C'].depth() <= self.cp['c-checks']['beta_6']:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_6'] else [0, 1]
else:
maintenance_actions = [1, 0] if ratio > self.cp['c-checks']['alpha_7'] else [0, 1]
# if self.calendar_tree['C'].depth() <= 240:
# maintenance_actions = [1, 0] if ratio > 0.65 else [0, 1]
# elif self.calendar_tree['C'].depth() <= 343:
# maintenance_actions = [1, 0] if ratio > 0.65 else [0, 1]
# elif self.calendar_tree['C'].depth() <= 727:
# maintenance_actions = [1, 0] if ratio > 0.65 else [0, 1]
# elif self.calendar_tree['C'].depth() <= 785:
# maintenance_actions = [1, 0] if ratio > 0.75 else [0, 1]
# elif self.calendar_tree['C'].depth() <= 927:
# maintenance_actions = [1, 0] if ratio > 0.8 else [0, 1]
# elif self.calendar_tree['C'].depth() <= 960:
# maintenance_actions = [1, 0] if ratio > 0.8 else [0, 1]
# else:
# maintenance_actions = [1, 0] if ratio > 0.84 else [0, 1]
fleet_keys = list(fleet_state_0.keys())
for _ in fleet_keys:
last_code = self.code_generator['C'](fleet_state_0[_]['C-SN'])
if self.tats[_][last_code] == -1:
fleet_phasing_out_0[_] = deepcopy(fleet_state_0[_])
fleet_phasing_out_1[_] = deepcopy(fleet_state_1[_])
fleet_state_0.pop(_, None)
fleet_state_1.pop(_, None)
on_c_maintenance_all = deepcopy(on_c_maintenance_0)
for _ in on_c_maintenance_all:
print("{}-{} days remaining on maintenance".format(_, on_c_maintenance_tats_0[_]))
if on_c_maintenance_tats_0[_] == 0:
on_c_maintenance_0.remove(_)
on_c_maintenance_tats_0.pop(_, None)
on_c_maintenance_1.remove(_)
on_c_maintenance_tats_1.pop(_, None)
else:
on_c_maintenance_tats_0[_] -= 1
on_c_maintenance_tats_1[_] -= 1
if c_maintenance_counter > 0:
c_maintenance_counter -= 1
for action_value in maintenance_actions:
if action_value and self.calendar.calendar[day]['allowed'][
'public holidays'] and self.calendar.calendar[day]['allowed'][
'c-type'] and self.calendar.calendar[day]['allowed']['c_peak']:
on_maintenance = list(fleet_state_1.keys())[0]
le_d_check = False
for key in fleet_state_1.keys():
d_ratio = fleet_state_1[key]['DY-D-RATIO']
if d_ratio >= 1:
on_maintenance = key
le_d_check = True
new_code = self.code_generator['C'](fleet_state_1[on_maintenance]['C-SN'])
valid_c, on_c_maintenance_1, real_tats = self.c_allowed(
day, on_maintenance, on_c_maintenance_1, slots, c_maintenance_counter, new_code,
on_c_maintenance_tats_1)
if valid_c:
is_D_check = (self.is_d_check(on_maintenance, fleet_state_1) or le_d_check)
fleet_state_1 = self.fleet_operate_one_day(fleet_state_1,
day_old,
on_c_maintenance_1,
type_check=type_check,
type_D_check=is_D_check)
fleet_state_1 = order_fleet_state(fleet_state_1)
fleet_phasing_out_1 = self.fleet_operate_one_day(fleet_phasing_out_1,
day_old, [],
type_check=type_check)
fleet_phasing_out_1, phased_out_1 = self.phasing_out(
fleet_phasing_out_1, phased_out_1, day_old)
valid = self.check_safety_fleet(fleet_state_1)
if valid:
calendar_1[day] = {}
calendar_1[day]['SLOTS'] = slots
calendar_1[day]['MAINTENANCE'] = True
calendar_1[day]['ASSIGNMENT'] = on_maintenance
calendar_1[day]['ASSIGNED STATE'] = {}
calendar_1[day]['ASSIGNED STATE']['STATE'] = fleet_state_1[on_maintenance]
calendar_1[day]['ASSIGNED STATE']['TAT'] = real_tats[on_maintenance]
c_maintenance_counter = 3
childs.append(
NodeScheduleDays(calendar_1,
day,
fleet_state_1,
action_value,
assignment=on_maintenance,
on_c_maintenance=on_c_maintenance_1,
c_maintenance_counter=c_maintenance_counter,
on_c_maintenance_tats=real_tats,
fleet_phasing_out=fleet_phasing_out_1,
phased_out=phased_out_1))
if not action_value:
fleet_state_0 = self.fleet_operate_one_day(fleet_state_0, day_old,
on_c_maintenance_0, type_check)
fleet_state_0 = order_fleet_state(fleet_state_0)
fleet_phasing_out_0 = self.fleet_operate_one_day(fleet_phasing_out_0, day_old, [],
type_check)
fleet_phasing_out_0, phased_out_0 = self.phasing_out(fleet_phasing_out_0,
phased_out_0, day_old)
valid = self.check_safety_fleet(fleet_state_0)
if valid:
calendar_0[day] = {}
calendar_0[day]['SLOTS'] = slots
calendar_0[day]['MAINTENANCE'] = False
calendar_0[day]['ASSIGNMENT'] = None
childs.append(
NodeScheduleDays(calendar_0,
day,
fleet_state_0,
action_value,
assignment=[],
on_c_maintenance=on_c_maintenance_0,
c_maintenance_counter=c_maintenance_counter,
on_c_maintenance_tats=on_c_maintenance_tats_0,
fleet_phasing_out=fleet_phasing_out_0,
phased_out=phased_out_0))
return childs
def expand_a_RL(self, node_schedule, type_check):
# recebe uma copia do calendario C para consultar
# precisamos do mesmo que a outra a dizer merged
calendar_0 = deepcopy(node_schedule.calendar)
calendar_1 = deepcopy(node_schedule.calendar)
fleet_state_0 = deepcopy(node_schedule.fleet_state)
fleet_state_1 = deepcopy(node_schedule.fleet_state)
on_c_maintenance_0 = deepcopy(node_schedule.on_c_maintenance)
on_c_maintenance_1 = deepcopy(node_schedule.on_c_maintenance)
on_c_maintenance_tats_0 = deepcopy(node_schedule.on_c_maintenance_tats)
on_c_maintenance_tats_1 = deepcopy(node_schedule.on_c_maintenance_tats)
on_maintenance_merged_0 = deepcopy(node_schedule.on_maintenance_merged)
on_maintenance_merged_1 = deepcopy(node_schedule.on_maintenance_merged)
merged_flag = False
day = node_schedule.day
day_old = day
childs = []
day = advance_date(day, days=int(1))
slots = self.get_slots(day, type_check)
iso_str = '5/2/2019'
daterinos = pd.to_datetime(iso_str, format='%m/%d/%Y')
if day == daterinos:
slots += 1
iso_str = '7/22/2019'
daterinos =
|
pd.to_datetime(iso_str, format='%m/%d/%Y')
|
pandas.to_datetime
|
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9,
|
pd.Timestamp("2015-02-10")
|
pandas.Timestamp
|
# <NAME>'s Kaggle Utilities
# Copyright 2019 by <NAME>, Open Source, Released under the Apache License
# For more information: https://github.com/jeffheaton/jh-kaggle-util
#
# Tune for XGBoost, based on: https://towardsdatascience.com/fine-tuning-xgboost-in-python-like-a-boss-b4543ed8b1e
#
from util import *
from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import GridSearchCV
import multiprocessing
import itertools
import demjson
import time
import pandas as pd
import numpy as np
import xgboost as xgb
import time
import os
import zipfile
import operator
from sklearn.metrics import log_loss
import scipy
from train_xgboost import TrainXGBoost
# http://stackoverflow.com/questions/2853212/all-possible-permutations-of-a-set-of-lists-in-python
FOLDS = 5
EARLY_STOP = 50
MAX_ROUNDS = 5
PARAMS1 = {
'objective': 'reg:linear',
'eval_metric': 'rmse',
'silent' : 1,
'learning_rate':0.0045,'seed':4242
}
train = TrainXGBoost("1",params=PARAMS1,run_single_fold=True)
def modelfit(params,x,y):
#Fit the algorithm on the data
print("fit")
alg = XGBClassifier(**params)
alg.fit(x,y,verbose=True)
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
print(feat_imp)
def update(base_dict, update_copy):
for key in update_copy.keys():
base_dict[key] = update_copy[key]
def step1_find_depth_and_child(params):
test1 = {
'max_depth': list(range(3,12,2)),
'min_child_weight': list(range(1,10,2))
}
return grid_search(params, test1, 1)
def step2_narrow_depth(params):
max_depth = params['max_depth']
test2 = {
'max_depth': [max_depth-1,max_depth,max_depth+1]
}
return grid_search(params, test2, 2)
def step3_gamma(params):
test3 = {
'gamma': list([i/10.0 for i in range(0,5)])
}
return grid_search(params, test3, 3)
def step4_sample(params):
test4 = {
'subsample':list([i/10.0 for i in range(6,10)]),
'colsample_bytree':list([i/10.0 for i in range(6,10)])
}
return grid_search(params, test4, 4)
def step5_reg1(params):
test5 = {
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
}
return grid_search(params, test5, 5)
def grid_search(params,grid,num):
keys = set(grid.keys())
l = [grid[x] for x in keys]
perm = list(itertools.product(*l))
jobs = []
for i in perm:
jobs.append({k:v for k,v in zip(keys,i)})
print("Total number of jobs: {}".format(len(jobs)))
column_step = []
column_score = []
column_jobs = []
for i,job in enumerate(jobs):
print("** Starting job: {}:{}/{}".format(num,i+1,len(jobs)))
params2 = dict(params)
update(params2,job)
train.params = params2
train.rounds = MAX_ROUNDS
train.early_stop = EARLY_STOP
result = train.run_cv()
print("Result: {}".format(result))
column_jobs.append(str(job))
column_score.append(result[0])
column_step.append(result[1])
df =
|
pd.DataFrame({'job':column_jobs,'step':column_step,'score':column_score},columns=['job','score','step'])
|
pandas.DataFrame
|
from glob import glob
import pandas as pd
def concat_csv_files_irt(folder_with_csv="/content/csvs_IRT"):
# print(folder_with_csv)
df_aux=[]
for csv in glob(folder_with_csv+"/*.csv"):
df = pd.read_csv(csv, index_col=None, header=0)
df.rename(columns={'Unnamed: 0': 'filename'},inplace=True)
df["pert"]=csv.split(".")[-3]
df_aux.append(df)
df = pd.concat(df_aux, axis=0, ignore_index=True)
return df
def concat_csv_files_porcentajes(folder_with_csv="/content/csvs_IRT"):
# print(folder_with_csv)
df_aux=[]
for csv in glob(folder_with_csv+"/*.csv"):
df = pd.read_csv(csv, index_col=None,
header=0,converters={'filename': lambda x: str(x).split(".")[0]})
df.rename(columns={'Unnamed: 0': 'filename'},inplace=True)
df["pert"]=csv.split("_")[-1].split(".")[0]
df_aux.append(df)
df =
|
pd.concat(df_aux, axis=0, ignore_index=True)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 14:37:43 2021
@author: Three_Gold
"""
"""
1、对提供的图书大数据做初步探索性分析。分析每一张表的数据结构。
2、对给定数据做数据预处理(数据清洗),去重,去空。
3、按照分配的院系保留自己需要的数据(教师的和学生的都要),这部分数据为处理好的数据。
4、将处理好的数据保存,文件夹名称为预处理后数据。
"""
import pandas as pd
import time
import os
import jieba
import wordcloud
path = os.getcwd()#获取项目根目录
path = path.replace('\\', '/')#设置项目根目录路径
Path = path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/'#设置预处理后数据根目录路径
#数据清洗,去重,去空
A1_UserID = pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/读者信息.xlsx')
A1_UserID = A1_UserID.dropna()#去除空值
A1_UserID = A1_UserID.drop_duplicates()#去重
A1_UserID = A1_UserID.reset_index(drop=True)#重设索引
A1_UserID.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/读者信息.xlsx')#保存数据
book_list = pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/图书目录.xlsx')#读取图书目录预处理后数据
book_list = book_list.dropna()#去除空值
book_list = book_list.drop_duplicates()#去重
book_list = book_list.reset_index(drop=True)#重设索引
book_list.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书目录.xlsx')#保存数据
def bookyearsdata():#对借还信息进行去重去空再保存
Year_All = ['2014','2015','2016','2017']
for year in Year_All:
address = path + '/代码/12暖暖小组01号彭鑫项目/原始数据/图书借还' + year +'.xlsx'#获得预处理后数据路径
address_last = path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书借还' + year +'.xlsx'#获得清洗后数据保存路径
book = pd.read_excel(address)#读取预处理后数据
book = book.dropna()#去除空值
book = book.drop_duplicates()#去重
book = book.reset_index(drop=True)#重设索引
book.to_excel(address_last)#保存清洗后数据至新路径
pass
pass
bookyearsdata()#调用上述方法
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))#获取打印本地当时时间
print('数据清洗,去重,去空完毕')
#利用原有数据重新制作图书分类表
#制作大类表
n=0#截取数据得到基本的大类对应表
with open(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/《中国图书馆图书分类法》简表.txt',"r", encoding='UTF-8') as f:#用文件管理器打开预处理后数据文档
with open(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/图书大类表.txt', "w", encoding='UTF-8') as f1:#用文件管理器打开(创建)新的分类表
for line in f.readlines():#读取预处理后数据文档的每一行
n=n+1#行数累加
if n <22:#截取行数小于22的行,对应所有图书大类的分类详情
f1.writelines(line)#将行数小于22的行信息写入新的分类表
All_Book_List = pd.read_csv(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/图书大类表.txt', names= ['图书分类号及类别'], sep='\n', encoding='utf8')#读取新的分类表
All_Book_List = All_Book_List.drop_duplicates()#去重
All_Book_List = All_Book_List.applymap(lambda x:x.strip() if type(x)==str else x)#去除左右空格
All_Book_List = All_Book_List.replace('K 历史、地理 N 自然科学总论','K 历史、地理')#清洗数据
All_Book_List = All_Book_List.replace('V 航空、航天','V 航空、航天')#针对性修改数据
All_Book_List = All_Book_List.replace('X 环境科学、劳动保护科学(安全科学)','X 环境科学、劳动保护科学(安全科学)')#针对性修改数据
All_Book_List.loc[21] = ("N 自然科学总论")#针对性添加数据
All_Book_List['图书分类号'] = All_Book_List['图书分类号及类别'].map(lambda x:x.split()[0])#截取中间空格前字符
All_Book_List['类别'] = All_Book_List['图书分类号及类别'].map(lambda x:x.split()[-1])#截取中间空格后字符
All_Book_List = All_Book_List[['图书分类号','类别']]#保留所需列
All_Book_List.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/图书大类表.xlsx')#写入保存
os.remove(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/图书大类表.txt')#删除过渡数据
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))#获取打印本地当时时间
print("制作大类表完毕")
#制作小说类别表,用于求小说类等
Novel_List = pd.read_csv(path + '/代码/12暖暖小组01号彭鑫项目/原始数据/《中国图书馆图书分类法》简表.txt',"r" ,names= ['图书分类号及类别'],encoding='UTF-8')#读取总表信息
Novel_List =Novel_List.loc[Novel_List['图书分类号及类别'].str.contains('I24')]#模糊搜索小说类别截取保存
Novel_List = Novel_List.drop_duplicates()#去重
Novel_List = Novel_List.applymap(lambda x:x.strip() if type(x)==str else x)#去除左右空格
Novel_List['图书分类号'] = Novel_List['图书分类号及类别'].map(lambda x:x.split(' ',1)[0])#截取中间空格前字符
Novel_List['类别'] = Novel_List['图书分类号及类别'].map(lambda x:x.split(' ',1)[-1])#截取中间空格后字符
Novel_List = Novel_List[['图书分类号','类别']]#保留所需列
for index,row in Novel_List['图书分类号'].iteritems():#添加未在分类法里有而数据里有的细化分类号
if len(row) == 6:#判定为细化分类号的行
I_row = pd.DataFrame()#创建临时空值表
I_row_ = Novel_List.loc[Novel_List['图书分类号'] == row]#截取判定为细化分类号的行
for i in range(10):#为截取判定为细化分类号的行细化数据添加尾号0-9
i = str(i)#转型为str
I_row_i = I_row_.replace(row,row+i)#再截取数据上进行修改加上尾号
Novel_List = Novel_List.append(I_row_i)#写入添加架上尾号的数据
pass
pass
Novel_List.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/小说类别表.xlsx')#写入保存
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))#获取打印本地当时时间
print("制作小说类别表完毕")
#制作图书目录字典
Book_xlsx = pd.DataFrame(pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书目录.xlsx',index_col=0))#导入数据
Book_Dic_Numble = Book_xlsx.set_index("图书ID").to_dict()['图书分类号']#创建图书id:图书分类号字典
Book_Dic_Name = Book_xlsx.set_index("图书ID").to_dict()['书名']#创建图书id:书名字典
#选择对应院系信息
A3_UserID = pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/读者信息.xlsx')#导入数据
A3_TeacherUserID = A3_UserID[A3_UserID['单位'] == '计算机与科学技术学院']#得到教师信息
A3_TeacherUserID = A3_TeacherUserID.append(A3_UserID[A3_UserID['单位'] == '计算机与信息技术学院'])#得到教师信息
A3_UserID = pd.DataFrame(A3_UserID[A3_UserID['读者类型'] == '本科生'])#得到所有本科生信息
A3_UserID_a = A3_UserID[A3_UserID['单位'] == '计2011-2']#筛选特殊专业
A3_UserID_b = A3_UserID[A3_UserID['单位'] == '计2012-08']#筛选特殊专业
A3_UserID_c = A3_UserID[A3_UserID['单位'] == '计2012-2']#筛选特殊专业
A3_UserID_All = pd.DataFrame()#学生数据总表
A3_UserID_All = A3_UserID_All.append(A3_UserID_a)#添加
A3_UserID_All = A3_UserID_All.append(A3_UserID_b)#添加
A3_UserID_All = A3_UserID_All.append(A3_UserID_c)#添加
glass = ['计2013', '计2014', '计2015', '计2016', '计2017',
'软工2011','软工2013',
'软件2014','软件2015','软件2016','软件2017',
'物联2013','物联2014','物联2015','物联2016','物联2017',
'信息2013','信息2014','信息2015','信息2016','信息2017']#制作院系专业表
for j in glass:#循环遍历得到所有专业表学生数据
for i in range(1,7):
glass_test = str(j)+ '-' + str(i)
A3_UserID_test = A3_UserID[A3_UserID['单位']==glass_test]
A3_UserID_All = A3_UserID_All.append(A3_UserID_test)
A3_StudentUserID_All = A3_UserID_All
A3_AllUserID = A3_StudentUserID_All.append(A3_TeacherUserID)#制成学院总表
A3_TeacherUserID = A3_TeacherUserID[['读者ID','读者类型','单位']]
A3_TeacherUserID.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院教师读者信息表.xlsx')#写入保存
A3_StudentUserID_All = A3_StudentUserID_All[['读者ID','读者类型','单位']]
A3_StudentUserID_All.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院学生读者信息表.xlsx')#写入保存
A3_AllUserID = A3_AllUserID[['读者ID','读者类型','单位']]
A3_AllUserID.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院读者信息表.xlsx')#写入保存
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print("选择对应院系信息完毕")
#得到图书借还的院系信息
def ChooseInf():#初步清洗各个年份数据,得到计算机院数据并将其保存
Year_All = ['2014','2015','2016','2017']
for Year in Year_All:
AfterPath = path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/'
A3_AllUser_bb = pd.DataFrame(pd.read_excel(AfterPath + '图书借还' + Year + '.xlsx',index_col=0))
A3_TeacherUser = pd.DataFrame(pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院教师读者信息表.xlsx',index_col=0))
A3_StudentUser = pd.DataFrame(pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院学生读者信息表.xlsx',index_col=0))
A3_AllUser_bb['读者ID'] = A3_AllUser_bb['读者ID'].apply(str)#转换为统一数据类型
A3_TeacherUser['读者ID'] = A3_TeacherUser['读者ID'].apply(str)#转换为统一数据类型
A3_StudentUser['读者ID'] = A3_StudentUser['读者ID'].apply(str)#转换为统一数据类型
A3_Result_Teacher = pd.merge(A3_AllUser_bb,A3_TeacherUser,how='outer',left_on = A3_AllUser_bb['读者ID'],right_on = A3_TeacherUser['读者ID'])#拼接两个数据表,得到有效数据和无效空值数据
A3_Result_Teacher = A3_Result_Teacher.dropna()#去除缺失值行
A3_Result_Teacher = A3_Result_Teacher.drop_duplicates()#去除重复值行
A3_Result_Teacher = A3_Result_Teacher[['操作时间','操作类型','图书ID']]#保留有效信息
A3_Result_Teacher.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院教师图书借还' + Year + '.xlsx')#保存该年份清洗后数据
print('保存' + Year + '教师借阅初始信息成功')
A3_Result_Student = pd.merge(A3_AllUser_bb,A3_StudentUser,how='outer',left_on = A3_AllUser_bb['读者ID'],right_on = A3_StudentUser['读者ID'])#拼接两个数据表,得到有效数据和无效空值数据
A3_Result_Student = A3_Result_Student.dropna()#去除缺失值行
A3_Result_Student = A3_Result_Student.drop_duplicates()#去除重复值行
A3_Result_Student = A3_Result_Student[['操作时间','操作类型','图书ID']]#保留有效信息
A3_Result_Student.to_excel(path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/计算机院学生图书借还' + Year + '.xlsx')#拼接两个数据表,得到有效数据和无效空值数据
print('保存' + Year + '学生借阅初始信息成功')
pass
ChooseInf()
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print("得到图书借还的院系信息完毕")
def AddInf():#将各个年份数据添加到总表,去除还书数据,得到完整的借书数据
Year_All = ['2014','2015','2016','2017']
Da_Teacher_All = pd.DataFrame()
Da_Student_All = pd.DataFrame()
for Year in Year_All:
Da_Teacher = pd.DataFrame(pd.read_excel(Path + '计算机院教师图书借还' + Year +'.xlsx',index_col=0))
Da_Teacher_All = Da_Teacher_All.append(Da_Teacher)
print('已添加%s教师信息至总表'%(Year))
Da_Student = pd.DataFrame(pd.read_excel(Path + '计算机院学生图书借还' + Year +'.xlsx',index_col=0))
Da_Student_All = Da_Student_All.append(Da_Student)
print('已添加%s学生信息至总表'%(Year))
pass
Da_Teacher_All = Da_Teacher_All.reset_index(drop=True)
Da_Student_All = Da_Student_All.reset_index(drop=True)
#去除所有同一本书的"还"数据
Da_Teacher_All_Back = Da_Teacher_All['操作类型'].isin(['还'])
Da_Student_All_Back = Da_Student_All['操作类型'].isin(['还'])
Da_Teacher_All_Borrow = Da_Teacher_All[~Da_Teacher_All_Back]
Da_Student_All_Borrow = Da_Student_All[~Da_Student_All_Back]
#去除同一本书同时被两个读者借的情况(即一个读者两个ID,数据默认保留第一条)
Da_Teacher_All_Borrow = Da_Teacher_All_Borrow.drop_duplicates(subset=['操作时间','图书ID'],keep ='first')
Da_Student_All_Borrow = Da_Student_All_Borrow.drop_duplicates(subset=['操作时间','图书ID'],keep ='first')
Da_Student_All_Borrow = Da_Student_All_Borrow.reset_index()
Da_Student_All_Borrow.to_excel(Path + '计算机院学生图书借总表.xlsx')
print('生成计算机院学生图书借总表成功')
Da_Teacher_All_Borrow = Da_Teacher_All_Borrow.reset_index()
Da_Teacher_All_Borrow.to_excel(Path + '计算机院教师图书借总表.xlsx')
print('生成计算机院教师图书借总表成功')
pass
AddInf()
'''以上为任务基础代码'''
'''==========================================================================================================================================='''
'''以下为各个任务代码'''
#加载必要的库
import pandas as pd
import os
import time
import jieba
import wordcloud
#设置路径
path = os.getcwd()#获取项目根目录
path = path.replace('\\', '/')#设置项目根目录路径
Path = path + '/代码/12暖暖小组01号彭鑫项目/预处理后数据/'#设置预处理后数据根目录路径
#为下面程序提供数据,避免重复加载
Da_Teacher_All_Borrow = pd.DataFrame(pd.read_excel(Path + '计算机院教师图书借总表.xlsx',index_col=0))#导入数据
Da_Teacher_All_Borrow['操作时间']=Da_Teacher_All_Borrow['操作时间'].apply(str)#将数据转化为str类型
Da_Student_All_Borrow = pd.DataFrame(pd.read_excel(Path + '计算机院学生图书借总表.xlsx',index_col=0))#导入数据
Da_Student_All_Borrow['操作时间']=Da_Student_All_Borrow['操作时间'].apply(str)#将数据转化为str类型
All_Book_List = pd.DataFrame(pd.read_excel(Path + '图书大类表.xlsx',index_col=0))#导入数据
Book_xlsx = pd.DataFrame(pd.read_excel(path + '/代码/12暖暖小组01号彭鑫项目/清洗后数据/图书目录.xlsx',index_col=0))#导入数据
Novel_List = pd.DataFrame(pd.read_excel(Path + '小说类别表.xlsx',index_col=0))#导入数据
# 5、统计你分配的那个学院的教师2014年,2015年,2016年,2017年所借书籍类别占前10的都是什么类别。
def Task5():
Year_All = ['2014','2015','2016','2017']
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)#转型以便拼接
Da_Teacher_All_Borrow_Year = Da_Teacher_All_Borrow[Da_Teacher_All_Borrow['操作时间'].str.contains(Year)]#截取该年份信息
Da_Teacher_All_Borrow_Year = pd.merge(Da_Teacher_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Teacher_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])#将借信息与图书目录信息按照图书id进行拼接
Da_Teacher_All_Borrow_Year_BookNumble = Da_Teacher_All_Borrow_Year[['图书分类号']]#保留所需信息
Da_Teacher_All_Borrow_Year_BookNumble = Da_Teacher_All_Borrow_Year_BookNumble['图书分类号'].str.extract('([A-Z])', expand=False)#利用正则表达式获取图书分类号首字母
Da_Teacher_All_Borrow_Year_mes = pd.merge(Da_Teacher_All_Borrow_Year_BookNumble,All_Book_List)#将获取的首字母与图书大类表进行拼接
Da_Teacher_All_Borrow_Year_10 = Da_Teacher_All_Borrow_Year_mes['类别'].value_counts()[:10]#保留排名前10的信息
Da_Teacher_All_Borrow_Year_10 = Da_Teacher_All_Borrow_Year_10.reset_index()#重设索引
Da_Teacher_All_Borrow_Year_10.columns = ['类别','数量']#设置列名
Da_Teacher_All_Borrow_Year_10.index = Da_Teacher_All_Borrow_Year_10.index + 1#将索引加1得到排名
Da_Teacher_All_Borrow_Year_10.index.name = '排名'#将索引名称设置为排名
print('---------%s---------'%(Year) + '\n' + '%s教师所借书籍前十'%(Year))
print(Da_Teacher_All_Borrow_Year_10)#打印信息
pass
pass
pass
# 6、统计你分配的那个学院的教师2014年,2015年,2016年,2017年所最喜欢看的小说分别是那一本。
def Task6():
Year_All = ['2014','2015','2016','2017']
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)#转型以便拼接
Da_Teacher_All_Borrow_Year = Da_Teacher_All_Borrow[Da_Teacher_All_Borrow['操作时间'].str.contains(Year)]#截取该年份信息
Da_Teacher_All_Borrow_Year = pd.merge(Da_Teacher_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Teacher_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])#将借信息与图书目录信息按照图书id进行拼接
Da_Teacher_All_Borrow_Year_BookNumble = Da_Teacher_All_Borrow_Year[['图书分类号','书名']]#保留所需信息
Da_Teacher_All_Borrow_Year_BookNumble['图书分类号'] = Da_Teacher_All_Borrow_Year_BookNumble['图书分类号'].apply(str)#转型以便拼接
Da_Teacher_All_Borrow_Year_novel = Da_Teacher_All_Borrow_Year_BookNumble.loc[Da_Teacher_All_Borrow_Year_BookNumble['图书分类号'].str.contains('I24')]#模糊搜索小说信息截取保存
Da_Teacher_All_Borrow_Year_novel_1 =Da_Teacher_All_Borrow_Year_novel['书名'].value_counts()[:1]#对书名进行数量排序,并得到数量最多的书名
Da_Teacher_All_Borrow_Year_novel_1.index.name = '书名'#将索引名设置为'书名'
print('---------%s---------'%(Year) + '\n' + '%s教师最喜欢看的小说'%(Year))
print(Da_Teacher_All_Borrow_Year_novel_1)#打印信息
pass
pass
# 7、统计你分配的那个学院的教师2014年,2015年,2016年,2017年一共借了多少书,专业书籍多少本。
def Task7():
Year_All = ['2014','2015','2016','2017']
All_Book = 0#初始化数据
All_MBook = 0#初始化数据
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)#转型以便拼接
Da_Teacher_All_Borrow_Year = Da_Teacher_All_Borrow[Da_Teacher_All_Borrow['操作时间'].str.contains(Year)]#截取该年份信息
Da_Teacher_All_Borrow_Year = pd.merge(Da_Teacher_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Teacher_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])#将借信息与图书目录信息按照图书id进行拼接
Da_Teacher_All_Borrow_Year_BookNumble = Da_Teacher_All_Borrow_Year[['图书分类号','书名']]#保留所需信息
Da_Teacher_All_Borrow_Year_major = Da_Teacher_All_Borrow_Year_BookNumble#数据另存为
Da_Teacher_All_Borrow_Year_major['图书分类号'] = Da_Teacher_All_Borrow_Year_major['图书分类号'].apply(str)#转型以便拼接
Da_Teacher_All_Borrow_Year_major = Da_Teacher_All_Borrow_Year_major.loc[Da_Teacher_All_Borrow_Year_major['图书分类号'].str.contains('TP3')]#模糊搜索专业书籍信息截取保存
All_Book = All_Book + Da_Teacher_All_Borrow_Year_BookNumble.shape[0]#加上每一年借书本数
All_MBook = All_MBook + Da_Teacher_All_Borrow_Year_major.shape[0]#加上每一年借专业书本书
print('---------%s---------'%(Year))
print('%s教师总共借书'%(Year) + str(Da_Teacher_All_Borrow_Year_BookNumble.shape[0]) + '本')#打印信息
print('%s教师借了专业书籍'%(Year) + str(Da_Teacher_All_Borrow_Year_major.shape[0]) + '本')#打印信息
pass
print('-------------------------------')
print('计算机与信息技术学院教师(2014年-2017年)一共借'+str(All_Book)+'书')#打印信息
print('计算机与信息技术学院教师(2014年-2017年)一共借了专业书'+str(All_MBook)+'书')#打印信息
pass
# 8、统计你分配的那个学院的教师2014年,2015年,2016年,2017年一共有多少本书没有归还。没有归还的书籍哪类书籍最多。
def Task8():
Da_Teacher_All = pd.DataFrame(pd.read_excel(Path + '计算机院教师图书借总表.xlsx',index_col=0))#读取院系借还信息
Da_Teacher_All = Da_Teacher_All.sort_values(by = '操作时间')#根据时间降序排列
Da_Teacher_All_Unback = Da_Teacher_All.drop_duplicates(subset=['图书ID'],keep ='last')#根据图书ID去重,保留最后一条信息
Da_Teacher_All_Unback_Num = pd.merge(Da_Teacher_All_Unback,Book_xlsx)#将信息与图书目录按照图书id拼接
Da_Teacher_All_Unback_Num = Da_Teacher_All_Unback_Num[['图书分类号']]#保留所需信息
Da_Teacher_All_Unback_Num['图书分类号'] = Da_Teacher_All_Unback_Num['图书分类号'].apply(str)#转型以便拼接
Da_Teacher_All_Unback_Num = Da_Teacher_All_Unback_Num['图书分类号'].str.extract('([A-Z])', expand=False)#利用正则表达式获取图书分类号首字母
Da_Teacher_All_Unback_Num = pd.merge(Da_Teacher_All_Unback_Num,All_Book_List)#将获取的首字母与图书大类表进行拼接
Da_Teacher_All_Unback_Num = Da_Teacher_All_Unback_Num['类别'].value_counts()[:1]#对类别进行数量排序,并得到数量最多的类别
print('------------------' + '\n' + '2014~2017教师总共未归还书籍' + str(Da_Teacher_All_Unback.shape[0]) + '本')#打印信息
print('教师未归还书籍中最多的类别是')
print(Da_Teacher_All_Unback_Num)
print('\n')
pass
'''下列9-12代码结构与上面基本相似省略注释'''
# 9、统计你分配的那个学院的学生2014年,2015年,2016年,2017年所借书籍类别占前10的都是什么类别。
def Task9():
Year_All = ['2014','2015','2016','2017']
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)
Da_Student_All_Borrow_Year = Da_Student_All_Borrow[Da_Student_All_Borrow['操作时间'].str.contains(Year)]
Da_Student_All_Borrow_Year = pd.merge(Da_Student_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Student_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])
Da_Student_All_Borrow_Year_BookNumble = Da_Student_All_Borrow_Year[['图书分类号']]
Da_Student_All_Borrow_Year_BookNumble['图书分类号'] = Da_Student_All_Borrow_Year_BookNumble['图书分类号'].apply(str)
Da_Student_All_Borrow_Year_BookNumble = Da_Student_All_Borrow_Year_BookNumble['图书分类号'].str.extract('([A-Z])', expand=False)
Da_Student_All_Borrow_Year_mes = pd.merge(Da_Student_All_Borrow_Year_BookNumble,All_Book_List)
Da_Student_All_Borrow_Year_10 = Da_Student_All_Borrow_Year_mes['类别'].value_counts()[:10]
Da_Student_All_Borrow_Year_10 = Da_Student_All_Borrow_Year_10.reset_index()
Da_Student_All_Borrow_Year_10.columns = ['类别','数量']
Da_Student_All_Borrow_Year_10.index = Da_Student_All_Borrow_Year_10.index + 1
Da_Student_All_Borrow_Year_10.index.name = '排名'
print('---------%s---------'%(Year) + '\n' + '%s学生所借书籍前十'%(Year))
print(Da_Student_All_Borrow_Year_10)
pass
pass
# 10、统计你分配的那个学院的学生2014年,2015年,2016年,2017年所最喜欢看的小说分别是那一本。
def Task10():
Year_All = ['2014','2015','2016','2017']
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)
Da_Student_All_Borrow_Year = Da_Student_All_Borrow[Da_Student_All_Borrow['操作时间'].str.contains(Year)]
Da_Student_All_Borrow_Year = pd.merge(Da_Student_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Student_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])
Da_Student_All_Borrow_Year_BookNumble = Da_Student_All_Borrow_Year[['图书分类号','书名']]
Da_Student_All_Borrow_Year_BookNumble['图书分类号'] = Da_Student_All_Borrow_Year_BookNumble['图书分类号'].apply(str)
Da_Student_All_Borrow_Year_novel = Da_Student_All_Borrow_Year_BookNumble.loc[Da_Student_All_Borrow_Year_BookNumble['图书分类号'].str.contains('I24')]
Da_Student_All_Borrow_Year_novel_1 =Da_Student_All_Borrow_Year_novel['书名'].value_counts()[:1]
Da_Student_All_Borrow_Year_novel_1.index.name = '书名'
print('---------%s---------'%(Year) + '\n' + '%s学生最喜欢看的小说'%(Year))
print(Da_Student_All_Borrow_Year_novel_1)
pass
pass
# 11、统计你分配的那个学院的学生2014年,2015年,2016年,2017年一共借了多少书,专业书籍多少本。
def Task11():
Year_All = ['2014','2015','2016','2017']
All_Book = 0
All_MBook = 0
for Year in Year_All:
Book_xlsx['图书ID'] = Book_xlsx['图书ID'].apply(str)
Da_Student_All_Borrow_Year = Da_Student_All_Borrow[Da_Student_All_Borrow['操作时间'].str.contains(Year)]
Da_Student_All_Borrow_Year = pd.merge(Da_Student_All_Borrow_Year,Book_xlsx,how='left',left_on=Da_Student_All_Borrow_Year['图书ID'],right_on=Book_xlsx['图书ID'])
Da_Student_All_Borrow_Year_BookNumble = Da_Student_All_Borrow_Year[['图书分类号','书名']]
Da_Student_All_Borrow_Year_major = Da_Student_All_Borrow_Year_BookNumble
Da_Student_All_Borrow_Year_major['图书分类号'] = Da_Student_All_Borrow_Year_major['图书分类号'].apply(str)
Da_Student_All_Borrow_Year_major = Da_Student_All_Borrow_Year_major.loc[Da_Student_All_Borrow_Year_major['图书分类号'].str.contains('TP3')]
All_Book = All_Book + Da_Student_All_Borrow_Year_BookNumble.shape[0]
All_MBook = All_MBook + Da_Student_All_Borrow_Year_major.shape[0]
print('---------%s---------'%(Year))
print('%s学生总共借书'%(Year) + str(Da_Student_All_Borrow_Year_BookNumble.shape[0]) + '本')
print('%s学生借了专业书籍'%(Year) + str(Da_Student_All_Borrow_Year_major.shape[0]) + '本')
pass
print('-------------------------------')
print('计算机与信息技术学院学生(2014年-2017年)一共借'+str(All_Book)+'书')
print('计算机与信息技术学院学生(2014年-2017年)一共借了专业书'+str(All_MBook)+'书')
pass
# 12、统计你分配的那个学院的学生2014年,2015年,2016年,2017年一共有多少本书没有归还。没有归还的书籍哪类书籍最多。
def Task12():
Da_Student_All = pd.DataFrame(pd.read_excel(Path + '计算机院学生图书借总表.xlsx',index_col=0))
Da_Student_All = Da_Student_All.sort_values(by = '操作时间')
Da_Student_All_Unback = Da_Student_All.drop_duplicates(subset=['图书ID'],keep ='last')
Da_Student_All_Unback_Num =
|
pd.merge(Da_Student_All_Unback,Book_xlsx)
|
pandas.merge
|
import torch
import json
import pandas as pd
import numpy as np
from tqdm import tqdm
import src.config as config
import src.model_utils as mutils
from src.dataset import CustomDataset
def predict(df, model, device, label_list, description_col=config.TEXT_COLUMN):
test_dataset = CustomDataset(
description=df[description_col].values
)
test_sampler = torch.utils.data.SequentialSampler(test_dataset)
test_data_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=config.VALID_BATCH_SIZE,
sampler=test_sampler,
num_workers=2
)
all_logits = None
model.eval()
tk0 = tqdm(test_data_loader, total=len(test_data_loader))
for step, batch in enumerate(tk0):
input_ids = batch['input_id']
input_mask= batch['input_mask']
segment_ids = batch['segment_id']
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits, _ = model(input_ids, segment_ids, input_mask)
logits = logits.sigmoid()
if all_logits is None:
all_logits = logits.detach().cpu().numpy()
else:
all_logits = np.concatenate((all_logits, logits.detach().cpu().numpy()), axis=0)
return pd.merge(df,
|
pd.DataFrame(all_logits, columns=label_list)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/8/7 14:43
# @Author : <NAME>
# @File : AE_run.py
import pandas as pd
import numpy as np
import argparse
from tqdm import tqdm
import autoencoder_model
import torch
import torch.utils.data as Data
def setup_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
def work(data, in_feas, lr=0.001, bs=32, epochs=100, device=torch.device('cpu'), a=0.4, b=0.3, c=0.3, mode=0, topn=100):
#name of sample
sample_name = data['Sample'].tolist()
#change data to a Tensor
X,Y = data.iloc[:,1:].values, np.zeros(data.shape[0])
TX, TY = torch.tensor(X, dtype=torch.float, device=device), torch.tensor(Y, dtype=torch.float, device=device)
#train a AE model
if mode == 0 or mode == 1:
print('Training model...')
Tensor_data = Data.TensorDataset(TX, TY)
train_loader = Data.DataLoader(Tensor_data, batch_size=bs, shuffle=True)
#initialize a model
mmae = autoencoder_model.MMAE(in_feas, latent_dim=100, a=a, b=b, c=c)
mmae.to(device)
mmae.train()
mmae.train_MMAE(train_loader, learning_rate=lr, device=device, epochs=epochs)
mmae.eval() #before save and test, fix the variables
torch.save(mmae, 'model/AE/MMAE_model.pkl')
#load saved model, used for reducing dimensions
if mode == 0 or mode == 2:
print('Get the latent layer output...')
mmae = torch.load('model/AE/MMAE_model.pkl')
omics_1 = TX[:, :in_feas[0]]
omics_2 = TX[:, in_feas[0]:in_feas[0]+in_feas[1]]
omics_3 = TX[:, in_feas[0]+in_feas[1]:in_feas[0]+in_feas[1]+in_feas[2]]
latent_data, decoded_omics_1, decoded_omics_2, decoded_omics_3 = mmae.forward(omics_1, omics_2, omics_3)
latent_df = pd.DataFrame(latent_data.detach().cpu().numpy())
latent_df.insert(0, 'Sample', sample_name)
#save the integrated data(dim=100)
latent_df.to_csv('result/latent_data.csv', header=True, index=False)
print('Extract features...')
extract_features(data, in_feas, epochs, topn)
return
def extract_features(data, in_feas, epochs, topn=100):
# extract features
#get each omics data
data_omics_1 = data.iloc[:, 1: 1+in_feas[0]]
data_omics_2 = data.iloc[:, 1+in_feas[0]: 1+in_feas[0]+in_feas[1]]
data_omics_3 = data.iloc[:, 1+in_feas[0]+in_feas[1]: 1+in_feas[0]+in_feas[1]+in_feas[2]]
#get all features of each omics data
feas_omics_1 = data_omics_1.columns.tolist()
feas_omics_2 = data_omics_2.columns.tolist()
feas_omics_3 = data_omics_3.columns.tolist()
#calculate the standard deviation of each feature
std_omics_1 = data_omics_1.std(axis=0)
std_omics_2 = data_omics_2.std(axis=0)
std_omics_3 = data_omics_3.std(axis=0)
#record top N features every 10 epochs
topn_omics_1 = pd.DataFrame()
topn_omics_2 = pd.DataFrame()
topn_omics_3 = pd.DataFrame()
#used for feature extraction, epoch_ls = [10,20,...], if epochs % 10 != 0, add the last epoch
epoch_ls = list(range(10, epochs+10,10))
if epochs %10 != 0:
epoch_ls.append(epochs)
for epoch in tqdm(epoch_ls):
#load model
mmae = torch.load('model/AE/model_{}.pkl'.format(epoch))
#get model variables
model_dict = mmae.state_dict()
#get the absolute value of weights, the shape of matrix is (n_features, latent_layer_dim)
weight_omics1 = np.abs(model_dict['encoder_omics_1.0.weight'].detach().cpu().numpy().T)
weight_omics2 = np.abs(model_dict['encoder_omics_2.0.weight'].detach().cpu().numpy().T)
weight_omics3 = np.abs(model_dict['encoder_omics_3.0.weight'].detach().cpu().numpy().T)
weight_omics1_df = pd.DataFrame(weight_omics1, index=feas_omics_1)
weight_omics2_df = pd.DataFrame(weight_omics2, index=feas_omics_2)
weight_omics3_df = pd.DataFrame(weight_omics3, index=feas_omics_3)
#calculate the weight sum of each feature --> sum of each row
weight_omics1_df['Weight_sum'] = weight_omics1_df.apply(lambda x:x.sum(), axis=1)
weight_omics2_df['Weight_sum'] = weight_omics2_df.apply(lambda x:x.sum(), axis=1)
weight_omics3_df['Weight_sum'] = weight_omics3_df.apply(lambda x:x.sum(), axis=1)
weight_omics1_df['Std'] = std_omics_1
weight_omics2_df['Std'] = std_omics_2
weight_omics3_df['Std'] = std_omics_3
#importance = Weight * Std
weight_omics1_df['Importance'] = weight_omics1_df['Weight_sum']*weight_omics1_df['Std']
weight_omics2_df['Importance'] = weight_omics2_df['Weight_sum']*weight_omics2_df['Std']
weight_omics3_df['Importance'] = weight_omics3_df['Weight_sum']*weight_omics3_df['Std']
#select top N features
fea_omics_1_top = weight_omics1_df.nlargest(topn, 'Importance').index.tolist()
fea_omics_2_top = weight_omics2_df.nlargest(topn, 'Importance').index.tolist()
fea_omics_3_top = weight_omics3_df.nlargest(topn, 'Importance').index.tolist()
#save top N features in a dataframe
col_name = 'epoch_'+str(epoch)
topn_omics_1[col_name] = fea_omics_1_top
topn_omics_2[col_name] = fea_omics_2_top
topn_omics_3[col_name] = fea_omics_3_top
#all of top N features
topn_omics_1.to_csv('result/topn_omics_1.csv', header=True, index=False)
topn_omics_2.to_csv('result/topn_omics_2.csv', header=True, index=False)
topn_omics_3.to_csv('result/topn_omics_3.csv', header=True, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-m', type=int, choices=[0,1,2], default=0,
help='Mode 0: train&intagrate, Mode 1: just train, Mode 2: just intagrate, default: 0.')
parser.add_argument('--seed', '-s', type=int, default=0, help='Random seed, default=0.')
parser.add_argument('--path1', '-p1', type=str, required=True, help='The first omics file name.')
parser.add_argument('--path2', '-p2', type=str, required=True, help='The second omics file name.')
parser.add_argument('--path3', '-p3', type=str, required=True, help='The third omics file name.')
parser.add_argument('--batchsize', '-bs', type=int, default=32, help='Training batchszie, default: 32.')
parser.add_argument('--learningrate', '-lr', type=float, default=0.001, help='Learning rate, default: 0.001.')
parser.add_argument('--epoch', '-e', type=int, default=100, help='Training epochs, default: 100.')
parser.add_argument('--latent', '-l', type=int, default=100, help='The latent layer dim, default: 100.')
parser.add_argument('--device', '-d', type=str, choices=['cpu', 'gpu'], default='cpu', help='Training on cpu or gpu, default: cpu.')
parser.add_argument('--a', '-a', type=float, default=0.6, help='[0,1], float, weight for the first omics data')
parser.add_argument('--b', '-b', type=float, default=0.1, help='[0,1], float, weight for the second omics data.')
parser.add_argument('--c', '-c', type=float, default=0.3, help='[0,1], float, weight for the third omics data.')
parser.add_argument('--topn', '-n', type=int, default=100, help='Extract top N features every 10 epochs, default: 100.')
args = parser.parse_args()
#read data
omics_data1 = pd.read_csv(args.path1, header=0, index_col=None)
omics_data2 = pd.read_csv(args.path2, header=0, index_col=None)
omics_data3 =
|
pd.read_csv(args.path3, header=0, index_col=None)
|
pandas.read_csv
|
USAGE = """
python Metrics.py
Needs access to these box folders and M Drive
Box/Modeling and Surveys/Urban Modeling/Bay Area UrbanSim 1.5/PBA50/Draft Blueprint runs/
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
Processes model outputs and creates a single csv with scenario metrics in this folder:
Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/
This csv file will have 6 columns:
1) modelrun ID
2) metric ID
3) metric name
4) year (note: for metrics that depict change from 2015 to 2050, this value will be 2050)
5) blueprint type
6) metric value
"""
import datetime, os, sys
import numpy, pandas as pd
from collections import OrderedDict, defaultdict
def calculate_urbansim_highlevelmetrics(runid, dbp, parcel_sum_df, county_sum_df, metrics_dict):
metric_id = "Overall"
#################### Housing
# all households
metrics_dict[runid,metric_id,'TotHH_region',y2,dbp] = parcel_sum_df['tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_region',y1,dbp] = parcel_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_growth_region',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_region',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_region',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp] = parcel_sum_df['tothh_2050'].sum() - parcel_sum_df['tothh_2015'].sum()
# HH growth by county
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'TotHH_county_growth_%s' % row['county'],y_diff,dbp] = row['tothh_growth']
metrics_dict[runid,metric_id,'TotHH_county_shareofgrowth_%s' % row['county'],y_diff,dbp] = row['tothh_growth'] / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in all GGs
metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_GG_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_GG',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_GG',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in PDAs
metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_PDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_PDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_PDA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_PDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in GGs that are not PDAs
metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_GG_notPDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_GG_notPDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_GG_notPDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in HRAs
metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_HRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_HRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_HRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_HRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in TRAs
metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_TRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_TRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_TRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_TRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
# HH Growth in areas that are both HRAs and TRAs
metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'tothh_2050'].sum()
metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_HRAandTRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] / metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotHH_HRAandTRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y2,dbp] - metrics_dict[runid,metric_id,'TotHH_HRAandTRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotHH_growth_region_number',y_diff,dbp]
#################### Jobs
# all jobs
metrics_dict[runid,metric_id,'TotJobs_region',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_region',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_growth_region',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_region',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_region',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp] = parcel_sum_df['totemp_2050'].sum() - parcel_sum_df['totemp_2015'].sum()
#Job growth by county
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'TotJobs_growth_%s' % row['county'],y_diff,dbp] = row['totemp_growth']
metrics_dict[runid,metric_id,'TotJobs_county_shareofgrowth_%s' % row['county'],y_diff,dbp] = row['totemp_growth'] / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in all GGs
metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('GG', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_GG_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_GG',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_GG',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in PDAs
metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pda_id'].str.contains('', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_PDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_PDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_PDA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_PDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in GGs that are not PDAs
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('GG', na=False)) & \
(parcel_sum_df['pda_id'].str.contains('', na=False)==0), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_GG_notPDA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_GG_notPDA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in HRAs
metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_HRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_HRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_HRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in TRAs
metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_TRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_TRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_TRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_TRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
# Job Growth in areas that are both HRAs and TRAs
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) &\
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) , 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] / metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp] - 1
metrics_dict[runid,metric_id,'TotJobs_HRAandTRA_shareofgrowth',y_diff,dbp] = (metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y2,dbp] - metrics_dict[runid,metric_id,'TotJobs_HRAandTRA',y1,dbp]) / metrics_dict[runid,metric_id,'TotJobs_growth_region_number',y_diff,dbp]
############################
# LIHH
metrics_dict[runid,metric_id,'LIHH_share_2050',y2,dbp] = (parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_share_2015',y1,dbp] = (parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_growth_region',y_diff,dbp] = (parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / (parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2050'].sum())
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'LIHH_growth_%s' % row["county"],y_diff,dbp] = row['LIHH_growth']
# all jobs
metrics_dict[runid,metric_id,'tot_jobs_2050',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'tot_jobs_2015',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_region',y_diff,dbp] = (parcel_sum_df['totemp_2050'].sum() / parcel_sum_df['totemp_2015'].sum())
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'jobs_growth_%s' % row["county"],y_diff,dbp] = row['totemp_growth']
def calculate_tm_highlevelmetrics(runid, dbp, parcel_sum_df, county_sum_df, metrics_dict):
metric_id = "Overall_TM"
# TBD
def calculate_normalize_factor_Q1Q2(parcel_sum_df):
return ((parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()) / parcel_sum_df['tothh_2050'].sum()) \
/ ((parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()) / parcel_sum_df['tothh_2015'].sum())
def calculate_normalize_factor_Q1(parcel_sum_df):
return (parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()) \
/ (parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum())
def calculate_Affordable1_transportation_costs(runid, year, dbp, tm_scen_metrics_df, tm_auto_owned_df, tm_auto_times_df, tm_travel_cost_df, metrics_dict):
metric_id = "A1"
days_per_year = 300
# Total number of households
tm_tot_hh = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_households_inc") == True), 'value'].sum()
tm_tot_hh_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_households_inc1"),'value'].item()
tm_tot_hh_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_households_inc2"),'value'].item()
# Total household income (model outputs are in 2000$, annual)
tm_total_hh_inc = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_hh_inc") == True), 'value'].sum()
tm_total_hh_inc_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_hh_inc_inc1"),'value'].item()
tm_total_hh_inc_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_hh_inc_inc2"),'value'].item()
# Total transit fares (model outputs are in 2000$, per day)
tm_tot_transit_fares = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_transit_fares") == True), 'value'].sum() * days_per_year
tm_tot_transit_fares_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_fares_inc1"),'value'].item() * days_per_year
tm_tot_transit_fares_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_fares_inc2"),'value'].item() * days_per_year
# Total auto op cost (model outputs are in 2000$, per day)
tm_tot_auto_op_cost = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_auto_cost_inc") == True), 'value'].sum() * days_per_year
tm_tot_auto_op_cost_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_cost_inc1"),'value'].item() * days_per_year
tm_tot_auto_op_cost_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_cost_inc2"),'value'].item() * days_per_year
# Total auto parking cost (model outputs are in 2000$, per day, in cents)
#tm_travel_cost_df['park_cost'] = (tm_travel_cost_df['pcost_indiv']+tm_travel_cost_df['pcost_joint']) * tm_travel_cost_df['freq']
tm_tot_auto_park_cost = (tm_travel_cost_df.pcost_indiv.sum() + tm_travel_cost_df.pcost_joint.sum()) * days_per_year / 100
tm_tot_auto_park_cost_inc1 = (tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 1),'pcost_indiv'].sum() + tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 1),'pcost_joint'].sum()) * days_per_year / 100
tm_tot_auto_park_cost_inc2 = (tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 2),'pcost_indiv'].sum() + tm_travel_cost_df.loc[(tm_travel_cost_df['incQ'] == 2),'pcost_joint'].sum()) * days_per_year / 100
# Calculating number of autos owned from autos_owned.csv
tm_auto_owned_df['tot_autos'] = tm_auto_owned_df['autos'] * tm_auto_owned_df['households']
tm_tot_autos_owned = tm_auto_owned_df['tot_autos'].sum()
tm_tot_autos_owned_inc1 = tm_auto_owned_df.loc[(tm_auto_owned_df['incQ'] == 1), 'tot_autos'].sum()
tm_tot_autos_owned_inc2 = tm_auto_owned_df.loc[(tm_auto_owned_df['incQ'] == 2), 'tot_autos'].sum()
# Total auto ownership cost in 2000$
tm_tot_auto_owner_cost = tm_tot_autos_owned * auto_ownership_cost * inflation_18_20 / inflation_00_20
tm_tot_auto_owner_cost_inc1 = tm_tot_autos_owned_inc1 * auto_ownership_cost_inc1 * inflation_18_20 / inflation_00_20
tm_tot_auto_owner_cost_inc2 = tm_tot_autos_owned_inc2 * auto_ownership_cost_inc2 * inflation_18_20 / inflation_00_20
# Total Transportation Cost (in 2000$)
tp_cost = tm_tot_auto_op_cost + tm_tot_transit_fares + tm_tot_auto_owner_cost + tm_tot_auto_park_cost
tp_cost_inc1 = tm_tot_auto_op_cost_inc1 + tm_tot_transit_fares_inc1 + tm_tot_auto_owner_cost_inc1 + tm_tot_auto_park_cost_inc1
tp_cost_inc2 = tm_tot_auto_op_cost_inc2 + tm_tot_transit_fares_inc2 + tm_tot_auto_owner_cost_inc2 + tm_tot_auto_park_cost_inc2
# Mean transportation cost per household in 2020$
tp_cost_mean = tp_cost / tm_tot_hh * inflation_00_20
tp_cost_mean_inc1 = tp_cost_inc1 / tm_tot_hh_inc1 * inflation_00_20
tp_cost_mean_inc2 = tp_cost_inc2 / tm_tot_hh_inc2 * inflation_00_20
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$',year,dbp] = tp_cost_mean
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$_inc1',year,dbp] = tp_cost_mean_inc1
metrics_dict[runid,metric_id,'mean_transportation_cost_2020$_inc2',year,dbp] = tp_cost_mean_inc2
# Transportation cost % of income
tp_cost_pct_inc = tp_cost / tm_total_hh_inc
tp_cost_pct_inc_inc1 = tp_cost_inc1 / tm_total_hh_inc_inc1
tp_cost_pct_inc_inc2 = tp_cost_inc2 / tm_total_hh_inc_inc2
tp_cost_pct_inc_inc1and2 = (tp_cost_inc1+tp_cost_inc2) / (tm_total_hh_inc_inc1+tm_total_hh_inc_inc2)
# Transportation cost % of income metrics
metrics_dict[runid,metric_id,'transportation_cost_pct_income',year,dbp] = tp_cost_pct_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1',year,dbp] = tp_cost_pct_inc_inc1
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc2',year,dbp] = tp_cost_pct_inc_inc2
metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1and2',year,dbp] = tp_cost_pct_inc_inc1and2
# Transportation cost % of income metrics; split by cost bucket
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autoop',year,dbp] = tm_tot_auto_op_cost / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autopark',year,dbp] = tm_tot_auto_park_cost / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_transitfare',year,dbp] = tm_tot_transit_fares / tm_total_hh_inc
metrics_dict[runid,metric_id,'transportation_cost_pct_income_autoown',year,dbp] = tm_tot_auto_owner_cost / tm_total_hh_inc
# Add housing costs from Shimon's outputs
housing_costs_2050_df = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/metrics_files/2050 Share of Income Spent on Housing.csv')
housing_costs_2015_df = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/metrics_files/2015 Share of Income Spent on Housing.csv')
housing_costs_2015_df['totcosts'] = housing_costs_2015_df['share_income'] * housing_costs_2015_df['households']
if year == "2050":
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp] = housing_costs_2050_df['w_all'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp] = housing_costs_2050_df['w_q1'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp] = housing_costs_2050_df['w_q2'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp] = housing_costs_2050_df['w_q1_q2'].sum()
elif year == "2015":
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['tenure'].str.contains("Total")), 'totcosts'].sum() / \
housing_costs_2015_df.loc[(housing_costs_2015_df['tenure'].str.contains("Total")), 'households'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'share_income'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp] = housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'share_income'].sum()
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp] = (housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'totcosts'].sum() + housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'totcosts'].sum()) / \
(housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q1t")), 'households'].sum() + housing_costs_2015_df.loc[(housing_costs_2015_df['short_name'].str.contains("q2t")), 'households'].sum())
# Total H+T Costs pct of income
metrics_dict[runid,metric_id,'HplusT_cost_pct_income',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc1',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc2',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc2',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc2',year,dbp]
metrics_dict[runid,metric_id,'HplusT_cost_pct_income_inc1and2',year,dbp] = metrics_dict[runid,metric_id,'transportation_cost_pct_income_inc1and2',year,dbp] + \
metrics_dict[runid,metric_id,'housing_cost_pct_income_inc1and2',year,dbp]
# Tolls & Fares
# Reading auto times file
tm_auto_times_df = tm_auto_times_df.sum(level='Income')
# Calculating Total Tolls per day = bridge tolls + value tolls (2000$)
total_tolls = OrderedDict()
for inc_level in range(1,5):
total_tolls['inc%d' % inc_level] = tm_auto_times_df.loc['inc%d' % inc_level, ['Bridge Tolls', 'Value Tolls']].sum()/100 # cents -> dollars
total_tolls_allHH = sum(total_tolls.values())
total_tolls_LIHH = total_tolls['inc1'] + total_tolls['inc2']
# Average Daily Tolls per household
metrics_dict[runid,metric_id,'tolls_per_HH',year,dbp] = total_tolls_allHH / tm_tot_hh * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_LIHH',year,dbp] = total_tolls_LIHH / (tm_tot_hh_inc1+tm_tot_hh_inc2) * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_inc1HH',year,dbp] = total_tolls['inc1'] / tm_tot_hh_inc1 * inflation_00_20
# Average Daily Fares per Household (note: transit fares totals calculated above are annual and need to be divided by days_per_year)
metrics_dict[runid,metric_id,'fares_per_HH',year,dbp] = tm_tot_transit_fares / tm_tot_hh * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_LIHH',year,dbp] = (tm_tot_transit_fares_inc1 + tm_tot_transit_fares_inc2) / (tm_tot_hh_inc1+tm_tot_hh_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_inc1HH',year,dbp] = tm_tot_transit_fares_inc1 / tm_tot_hh_inc1 * inflation_00_20 / days_per_year
# per trip
# Total auto trips per day (model outputs are in trips, per day)
tm_tot_auto_trips = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_auto_trips") == True), 'value'].sum()
tm_tot_auto_trips_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_trips_inc1"),'value'].item()
tm_tot_auto_trips_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_auto_trips_inc2"),'value'].item()
# Total transit trips per day (model outputs are in trips, per day)
tm_tot_transit_trips = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'].str.contains("total_transit_trips") == True), 'value'].sum()
tm_tot_transit_trips_inc1 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_trips_inc1"),'value'].item()
tm_tot_transit_trips_inc2 = tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "total_transit_trips_inc2"),'value'].item()
# Average Tolls per trip (total_tolls_xx is calculated above as per day tolls in 2000 dollars)
metrics_dict[runid,metric_id,'tolls_per_trip',year,dbp] = total_tolls_allHH / tm_tot_auto_trips * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_trip_inc1and2',year,dbp] = total_tolls_LIHH / (tm_tot_auto_trips_inc1+tm_tot_auto_trips_inc2) * inflation_00_20
metrics_dict[runid,metric_id,'tolls_per_trip_inc1',year,dbp] = total_tolls['inc1'] / tm_tot_auto_trips_inc1 * inflation_00_20
# Total auto operating cost per trip (tm_tot_auto_op_cost and tm_tot_auto_park_cost are calculated above as annual costs in 2000 dollars)
metrics_dict[runid,metric_id,'autocost_per_trip',year,dbp] = (tm_tot_auto_op_cost + tm_tot_auto_park_cost) / tm_tot_auto_trips * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'autocost_per_trip_inc1and2',year,dbp] = (tm_tot_auto_op_cost_inc1 + tm_tot_auto_op_cost_inc2 + tm_tot_auto_park_cost_inc1 + tm_tot_auto_park_cost_inc2) / (tm_tot_auto_trips_inc1+tm_tot_auto_trips_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'autocost_per_trip_inc1',year,dbp] = (tm_tot_auto_op_cost_inc1 + tm_tot_auto_park_cost_inc1) / tm_tot_auto_trips_inc1 * inflation_00_20 / days_per_year
# Average Fares per trip (note: transit fares totals calculated above are annual and need to be divided by days_per_year)
metrics_dict[runid,metric_id,'fares_per_trip',year,dbp] = tm_tot_transit_fares / tm_tot_transit_trips * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_trip_inc1and2',year,dbp] = (tm_tot_transit_fares_inc1 + tm_tot_transit_fares_inc2) / (tm_tot_transit_trips_inc1+tm_tot_transit_trips_inc2) * inflation_00_20 / days_per_year
metrics_dict[runid,metric_id,'fares_per_trip_inc1',year,dbp] = tm_tot_transit_fares_inc1 / tm_tot_transit_trips_inc1 * inflation_00_20 / days_per_year
def calculate_Affordable2_deed_restricted_housing(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "A2"
# totals for 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted_total',y2,dbp] = parcel_sum_df['deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_total',y1,dbp] = parcel_sum_df['deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_total',y2,dbp] = parcel_sum_df['residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_total',y1,dbp] = parcel_sum_df['residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_HRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_TRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_TRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'residential_units_2015'].sum()
metrics_dict[runid,metric_id,'deed_restricted_CoC',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'deed_restricted_units_2050'].sum()
metrics_dict[runid,metric_id,'deed_restricted_CoC',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'deed_restricted_units_2015'].sum()
metrics_dict[runid,metric_id,'residential_units_CoC',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'residential_units_2050'].sum()
metrics_dict[runid,metric_id,'residential_units_CoC',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['coc_flag_pba2050']==1, 'residential_units_2015'].sum()
# diff between 2050 and 2015
metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_total',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_total',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_total',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_total',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_HRA',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_HRA',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_HRA',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_HRA',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_TRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_TRA',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_TRA',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_TRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_TRA',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_TRA',y1,dbp]
metrics_dict[runid,metric_id,'deed_restricted_nonHRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] - metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'residential_units_nonHRA_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp] - metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_CoC_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_CoC',y2,dbp] - metrics_dict[runid,metric_id,'deed_restricted_CoC',y1,dbp]
metrics_dict[runid,metric_id,'residential_units_CoC_diff',y_diff,dbp] = metrics_dict[runid,metric_id,'residential_units_CoC',y2,dbp] - metrics_dict[runid,metric_id,'residential_units_CoC',y1,dbp]
# metric: deed restricted % of total units: overall, HRA and non-HRA
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_diff',y_diff,dbp] / metrics_dict[runid,metric_id,'residential_units_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_HRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_HRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_TRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_TRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_TRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_nonHRA_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_nonHRA_diff',y_diff,dbp]
metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_CoC',y_diff,dbp] = metrics_dict[runid,metric_id,'deed_restricted_CoC_diff',y_diff,dbp]/metrics_dict[runid,metric_id,'residential_units_CoC_diff',y_diff,dbp]
print('********************A2 Affordable********************')
print('DR pct of new units %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units',y_diff,dbp] )
print('DR pct of new units in HRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_HRA',y_diff,dbp] )
print('DR pct of new units in TRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_TRA',y_diff,dbp] )
print('DR pct of new units outside of HRAs %s' % dbp,metrics_dict[runid,metric_id,'deed_restricted_pct_new_units_nonHRA',y_diff,dbp])
# Forcing preservation metrics
metrics_dict[runid,metric_id,'preservation_affordable_housing',y_diff,dbp] = 1
def calculate_Connected1_accessibility(runid, year, dbp, tm_scen_metrics_df, metrics_dict):
metric_id = "C1"
# % of Jobs accessible by 30 min car OR 45 min transit
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_allmodes_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_accessible_job_share_noncoc"), 'value'].item()
# % of Jobs accessible by 30 min car only
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share_coc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_drv_only_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_drv_only_acc_accessible_job_share_noncoc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_noncoc"), 'value'].item()
# % of Jobs accessible by 45 min transit only
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only_coc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share_coc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_coc"), 'value'].item()
metrics_dict[runid,metric_id,'pct_jobs_acc_by_trn_only_noncoc',year,dbp] = \
tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_only_acc_accessible_job_share_noncoc"), 'value'].item() \
+ tm_scen_metrics_df.loc[(tm_scen_metrics_df['metric_name'] == "jobacc_trn_drv_acc_accessible_job_share_noncoc"), 'value'].item()
def calculate_Connected1_proximity(runid, year, dbp, tm_scen_metrics_df, metrics_dict):
metric_id = "C1"
def calculate_Connected2_crowding(runid, year, dbp, transit_operator_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_crowding_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/metrics/transit_crowding_complete.csv')
tm_crowding_df = tm_crowding_df[['TIME','SYSTEM','ABNAMESEQ','period','load_standcap','AB_VOL']]
tm_crowding_df = tm_crowding_df.loc[tm_crowding_df['period'] == "AM"]
tm_crowding_df['time_overcapacity'] = tm_crowding_df.apply (lambda row: row['TIME'] if (row['load_standcap']>1) else 0, axis=1)
tm_crowding_df['time_crowded'] = tm_crowding_df.apply (lambda row: row['TIME'] if (row['load_standcap']>0.85) else 0, axis=1)
tm_crowding_df['person_hrs_total'] = tm_crowding_df['TIME'] * tm_crowding_df['AB_VOL']
tm_crowding_df['person_hrs_overcap'] = tm_crowding_df['time_overcapacity'] * tm_crowding_df['AB_VOL']
tm_crowding_df['person_hrs_crowded'] = tm_crowding_df['time_crowded'] * tm_crowding_df['AB_VOL']
tm_crowding_df = pd.merge(left=tm_crowding_df, right=transit_operator_df, left_on="SYSTEM", right_on="SYSTEM", how="left")
system_crowding_df = tm_crowding_df[['person_hrs_total','person_hrs_overcap','person_hrs_crowded']].groupby(tm_crowding_df['operator']).sum().reset_index()
system_crowding_df['pct_overcapacity'] = system_crowding_df['person_hrs_overcap'] / system_crowding_df['person_hrs_total']
system_crowding_df['pct_crowded'] = system_crowding_df['person_hrs_crowded'] / system_crowding_df['person_hrs_total']
for index,row in system_crowding_df.iterrows():
if row['operator'] in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local','GGT Express','WETA']:
metrics_dict[runid,metric_id,'crowded_pct_personhrs_AM_%s' % row['operator'],year,dbp] = row['pct_crowded']
def calculate_Connected2_hwy_traveltimes(runid, year, dbp, hwy_corridor_links_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_loaded_network_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/avgload5period.csv')
# Keeping essential columns of loaded highway network: node A and B, distance, free flow time, congested time
tm_loaded_network_df = tm_loaded_network_df.rename(columns=lambda x: x.strip())
tm_loaded_network_df = tm_loaded_network_df[['a','b','distance','fft','ctimAM']]
tm_loaded_network_df['link'] = tm_loaded_network_df['a'].astype(str) + "_" + tm_loaded_network_df['b'].astype(str)
# merging df that has the list of all
hwy_corridor_links_df = pd.merge(left=hwy_corridor_links_df, right=tm_loaded_network_df, left_on="link", right_on="link", how="left")
corridor_travel_times_df = hwy_corridor_links_df[['distance','fft','ctimAM']].groupby(hwy_corridor_links_df['route']).sum().reset_index()
for index,row in corridor_travel_times_df.iterrows():
metrics_dict[runid,metric_id,'travel_time_AM_%s' % row['route'],year,dbp] = row['ctimAM']
def calculate_Connected2_trn_traveltimes(runid, year, dbp, transit_operator_df, metrics_dict):
metric_id = "C2"
if "2015" in runid: tm_run_location = tm_run_location_ipa
else: tm_run_location = tm_run_location_bp
tm_trn_line_df = pd.read_csv(tm_run_location+runid+'/OUTPUT/trn/trnline.csv')
# It doesn't really matter which path ID we pick, as long as it is AM
tm_trn_line_df = tm_trn_line_df.loc[tm_trn_line_df['path id'] == "am_wlk_loc_wlk"]
tm_trn_line_df = pd.merge(left=tm_trn_line_df, right=transit_operator_df, left_on="mode", right_on="mode", how="left")
# grouping by transit operator, and summing all line times and distances, to get metric of "time per unit distance", in minutes/mile
trn_operator_travel_times_df = tm_trn_line_df[['line time','line dist']].groupby(tm_trn_line_df['operator']).sum().reset_index()
trn_operator_travel_times_df['time_per_dist_AM'] = trn_operator_travel_times_df['line time'] / trn_operator_travel_times_df['line dist']
# grouping by mode, and summing all line times and distances, to get metric of "time per unit distance", in minutes/mile
trn_mode_travel_times_df = tm_trn_line_df[['line time','line dist']].groupby(tm_trn_line_df['mode_name']).sum().reset_index()
trn_mode_travel_times_df['time_per_dist_AM'] = trn_mode_travel_times_df['line time'] / trn_mode_travel_times_df['line dist']
for index,row in trn_operator_travel_times_df.iterrows():
if row['operator'] in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local']:
metrics_dict[runid,metric_id,'time_per_dist_AM_%s' % row['operator'],year,dbp] = row['time_per_dist_AM']
for index,row in trn_mode_travel_times_df.iterrows():
metrics_dict[runid,metric_id,'time_per_dist_AM_%s' % row['mode_name'],year,dbp] = row['time_per_dist_AM']
def calculate_Diverse1_LIHHinHRAs(runid, dbp, parcel_sum_df, tract_sum_df, GG_sum_df, normalize_factor_Q1Q2, normalize_factor_Q1, metrics_dict):
metric_id = "D1"
# Share of region's LIHH households that are in HRAs
metrics_dict[runid,metric_id,'LIHH_total',y2,dbp] = parcel_sum_df['hhq1_2050'].sum() + parcel_sum_df['hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_total',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() + parcel_sum_df['hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() + parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() + parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq2_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2,dbp] = metrics_dict[runid,metric_id,'LIHH_inHRA',y2,dbp] / metrics_dict[runid,metric_id,'LIHH_total',y2,dbp]
metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1,dbp] = metrics_dict[runid,metric_id,'LIHH_inHRA',y1,dbp] / metrics_dict[runid,metric_id,'LIHH_total',y1,dbp]
# normalizing for overall growth in LIHH
metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'LIHH_shareinHRA',y1,dbp] * normalize_factor_Q1Q2
# Total number of Households
# Total HHs in HRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'tothh_2050'].sum()
# Total HHs in TRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inTRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'tothh_2050'].sum()
# Total HHs in HRAs only, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'tothh_2050'].sum()
# Total HHs in TRAs only, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inTRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'tothh_2050'].sum()
# Total HHs in HRA/TRAs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inHRATRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inHRATRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & \
(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)), 'tothh_2050'].sum()
# Total HHs in DR Tracts, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inDRTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inDRTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tothh_2050'].sum()
# Total HHs in CoC Tracts, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tothh_2050'].sum()
# Total HHs in remainder of region (RoR); i.e. not HRA or TRA or CoC or DR
metrics_dict[runid,metric_id,'TotHH_inRoR',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inRoR',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'tothh_2050'].sum()
# Total HHs in GGs, in 2015 and 2050
metrics_dict[runid,metric_id,'TotHH_inGGs',y1,dbp] = GG_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inGGs',y2,dbp] = GG_sum_df['tothh_2050'].sum()
# Total HHs in Transit Rich GGs, in 2015 and 2050
GG_TRich_sum_df = GG_sum_df[GG_sum_df['Designation']=="Transit-Rich"]
metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y1,dbp] = GG_TRich_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y2,dbp] = GG_TRich_sum_df['tothh_2050'].sum()
########### Tracking movement of Q1 households: Q1 share of Households
# Share of Households that are Q1, within each geography type in this order:
# Overall Region; HRAs; TRAs, DR Tracts; CoCs; Rest of Region; and also GGs and TRichGGs
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofRegion_normalized',y1,dbp] = parcel_sum_df['hhq1_2015'].sum() / parcel_sum_df['tothh_2015'].sum() * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofRegion',y2,dbp] = parcel_sum_df['hhq1_2050'].sum() / parcel_sum_df['tothh_2050'].sum()
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('HRA', na=False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRA',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('tra', na=False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRAonly',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRAonly',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRAonly',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRAonly',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('tra', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRAonly',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False)), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRATRA',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofHRATRA',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False)) & (parcel_sum_df['pba50chcat'].str.contains('tra', na=False)), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inHRATRA',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inDRTracts',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofDRTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inDRTracts',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofCoCTracts',y2,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inCoCTracts',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y1,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inRoR',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofRoR_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofRoR',y2,dbp] = parcel_sum_df.loc[(parcel_sum_df['pba50chcat'].str.contains('HRA', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('tra', na=False) == False) & \
(parcel_sum_df['pba50chcat'].str.contains('DR', na=False) == False) & \
(parcel_sum_df['coc_flag_pba2050'] == 0), 'hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inRoR',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y1,dbp] = GG_sum_df['hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inGGs',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofGGs_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofGGs',y2,dbp] = GG_sum_df['hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inGGs',y2,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y1,dbp] = GG_TRich_sum_df['hhq1_2015'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y1,dbp]
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs_normalized',y1,dbp] = metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y1,dbp] * normalize_factor_Q1
metrics_dict[runid,metric_id,'Q1HH_shareofTRichGGs',y2,dbp] = GG_TRich_sum_df['hhq1_2050'].sum() / metrics_dict[runid,metric_id,'TotHH_inTRichGGs',y2,dbp]
'''
print('********************D1 Diverse********************')
print('Growth of LIHH share of population (normalize factor))',normalize_factor_Q1Q2 )
print('LIHH Share in HRA 2050 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareinHRA',y2,dbp] )
print('LIHH Share in HRA 2015 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareinHRA_normalized',y1,dbp] )
print('LIHH Share of HRA 2050 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareofHRA',y2,dbp])
print('LIHH Share of HRA 2015 %s' % dbp,metrics_dict[runid,metric_id,'LIHH_shareofHRA_normalized',y1,dbp] )
'''
def calculate_Diverse2_LIHH_Displacement(runid, dbp, parcel_sum_df, tract_sum_df, TRA_sum_df, GG_sum_df, normalize_factor_Q1Q2, normalize_factor_Q1, metrics_dict):
metric_id = "D2"
# For reference: total number of LIHH in tracts
metrics_dict[runid,metric_id,'LIHH_inDR',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'LIHH_inDR',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2015'].sum()
metrics_dict[runid,metric_id,'LIHH_inDR_normalized',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('DR', na=False), 'hhq1_2015'].sum() * normalize_factor_Q1
print('********************D2 Diverse********************')
print('Total Number of LIHH in DR tracts in 2050',metrics_dict[runid,metric_id,'LIHH_inDR',y2,dbp] )
print('Number of LIHH in DR tracts in 2015',metrics_dict[runid,metric_id,'LIHH_inDR',y1,dbp] )
print('Number of LIHH in DR tracts in normalized',metrics_dict[runid,metric_id,'LIHH_inDR_normalized',y1,dbp] )
###### Displacement at Tract Level (for Displacement Risk Tracts and CoC Tracts and HRA Tracts)
# Total number of DR, CoC, HRA Tracts
metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['DispRisk'] == 1), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['coc_flag_pba2050'] == 1), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] = tract_sum_df.loc[(tract_sum_df['hra'] == 1), 'tract_id'].nunique()
# Calculating share of Q1 households at tract level / we are not going to normalize this since we want to check impacts at neighborhood level
#tract_sum_df['hhq1_pct_2015_normalized'] = tract_sum_df['hhq1_2015'] / tract_sum_df['tothh_2015'] * normalize_factor_Q1
tract_sum_df['hhq1_pct_2050'] = tract_sum_df['hhq1_2050'] / tract_sum_df['tothh_2050']
tract_sum_df['hhq1_pct_2015'] = tract_sum_df['hhq1_2015'] / tract_sum_df['tothh_2015']
# Creating functions to check if rows of a dataframe lost hhq1 share or absolute; applied to tract_summary_df and TRA_summary_df
def check_losthhq1_share(row,j):
if (row['hhq1_pct_2015'] == 0): return 0
elif ((row['hhq1_pct_2050']/row['hhq1_pct_2015'])<j): return 1
else: return 0
def check_losthhq1_abs(row,j):
if (row['hhq1_2015'] == 0): return 0
elif ((row['hhq1_2050']/row['hhq1_2015'])<j): return 1
else: return 0
# Calculating number of Tracts that Lost LIHH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
# Calculating change in share of LIHH at tract level to check gentrification
tract_sum_df['lost_hhq1_%dpct' % i] = tract_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
#(lambda row: 1 if ((row['hhq1_pct_2050']/row['hhq1_pct_2015_normalized'])<j) else 0, axis=1)
#(lambda row: 1 if (row['hhq1_pct_2050'] < (row['hhq1_pct_2015']*j)) else 0, axis=1)
# Calculating absolute change in LIHH at tract level to check true displacement
tract_sum_df['lost_hhq1_abs_%dpct' % i] = tract_sum_df.apply (lambda row: check_losthhq1_abs(row,j), axis=1)
#(lambda row: 1 if (row['hhq1_2050'] < (row['hhq1_2015']*j)) else 0, axis=1)
############################### Gentrification
######## Gentrification in Displacement Risk Tracts
# Number or percent of DR tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] )
print('Number of DR Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of DR Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Gentrification in Communities of Concern
# Number or percent of CoC tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['coc_flag_pba2050'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] )
print('Number of CoC Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of CoC Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Gentrification in HRAs
# Number or percent of HRA tracts that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['hra'] == 1) & (tract_sum_df['lost_hhq1_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] )
print('Number of HRA Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of HRA Tracts that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_%dpct' % i,y_diff,dbp] )
############################### Displacement
######## Displacement in Displacement Risk Tracts
# Number or percent of DR tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_DRtracts_total',y1,dbp] )
print('Number of DR Tracts that lost LIHH from (in absolute numbers) 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of DR Tracts that lost LIHH from (in absolute numbers) 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_DRtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## Displacement in Communities of Concern
# Number or percent of CoC tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['coc_flag_pba2050'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_CoCtracts_total',y1,dbp] )
print('Number of CoC Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of CoC Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_CoCtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## Displacement in HRAs
# Number or percent of HRA tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = tract_sum_df.loc[((tract_sum_df['hra'] == 1) & (tract_sum_df['lost_hhq1_abs_%dpct' % i] == 1)), 'tract_id'].nunique()
metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_HRAtracts_total',y1,dbp] )
print('Number of HRA Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of HRA Tracts that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_HRAtracts_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
##### Calculating displacement risk using the PBA2040 methodology
# The analysis estimated which zones (i.e., TAZs) gained or lost lower-income households; those zones
# that lost lower-income households over the time period would be flagged as being at risk of displacement.
# The share of lower-income households at risk of displacement would be calculated by
# dividing the number of lower-income households living in TAZs flagged as PDAs, TPAs, or
# highopportunity areas with an increased risk of displacement by the total number of lower-income
# households living in TAZs flagged as PDAs, TPAs, or high-opportunity areas in 2040
# Calculating this first for all DR Risk/CoC/HRA tracts; and next for TRA areas
######## PBA40 Displacement risk in DR Risk/CoC/HRA tracts
# Q1 only
#metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y1,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
# (tract_sum_df['hra'] == 1)), 'hhq1_2015'].nunique()
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y2,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq1_2050'].sum()
# Total number of LIHH in HRA/CoC/DR tracts that lost hhq1
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts_disp',y_diff,dbp] = tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_DRCoCHRAtracts',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_LIHH_inDRCoCHRAtracts',y2,dbp]
#For both Q1, Q2 - because this is how it was done in PBA40
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts',y2,dbp] = tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq1_2050'].sum() + \
tract_sum_df.loc[((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts_disp',y_diff,dbp] = tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq1_2050'].sum() + \
tract_sum_df.loc[(((tract_sum_df['DispRisk'] == 1)|(tract_sum_df['coc_flag_pba2050'] == 1)|\
(tract_sum_df['hra'] == 1)) & (tract_sum_df['lost_hhq1_abs_0pct'] == 1)), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_Q1Q2_DRCoCHRAtracts',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inDRCoCHRAtracts',y2,dbp]
########### Repeating all above analysis for TRAs
# Calculating share of Q1 households at TRA level using TRA summary dataframe
TRA_sum_df['hhq1_pct_2015'] = TRA_sum_df['hhq1_2015'] / TRA_sum_df['tothh_2015']
#TRA_sum_df['hhq1_pct_2015_normalized'] = TRA_sum_df['hhq1_pct_2015'] * normalize_factor_Q1
TRA_sum_df['hhq1_pct_2050'] = TRA_sum_df['hhq1_2050'] / TRA_sum_df['tothh_2050']
# Total number of TRAs
metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp] = TRA_sum_df['juris_tra'].nunique()
# Calculating number of TRAs that Lost LIHH as a share of total HH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
# Calculating change in share of LIHH at TRA level to check gentrification
TRA_sum_df['lost_hhq1_%dpct' % i] = TRA_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
# Calculating absolute change in LIHH at TRA level to check true displacement
TRA_sum_df['lost_hhq1_abs_%dpct' % i] = TRA_sum_df.apply (lambda row: check_losthhq1_abs(row,j), axis=1)
######## Gentrification in TRAs
# Number or percent of TRAs that lost Q1 households as a share of total HH
metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_%dpct' % i] == 1), 'juris_tra'].nunique()
metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp])
print('Number of TRAs that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of TRAs that lost LIHH (as a share) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_%dpct' % i,y_diff,dbp] )
######## Displacement in TRAs
# Number or percent of DR tracts that lost Q1 households in absolute numbers
metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_%dpct' % i] == 1), 'juris_tra'].nunique()
metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_TRAs_total',y1,dbp])
print('Number of TRAs that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
print('Pct of TRAs that lost LIHH (in absolute numbers) from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_TRAs_lostLIHH_abs_%dpct' % i,y_diff,dbp] )
######## PBA40 Displacement Risk metric in TRAs
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs',y2,dbp] = TRA_sum_df['hhq1_2050'].sum()
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs_disp',y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq1_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_TRAs',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_LIHH_inTRAs_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_LIHH_inTRAs',y2,dbp]
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inTRAs',y2,dbp] = TRA_sum_df['hhq1_2050'].sum() + TRA_sum_df['hhq2_2050'].sum()
metrics_dict[runid,metric_id,'Num_Q1Q2_inTRAs_disp',y_diff,dbp] = TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq1_2050'].sum() + TRA_sum_df.loc[(TRA_sum_df['lost_hhq1_abs_0pct'] == 1), 'hhq2_2050'].sum()
metrics_dict[runid,metric_id,'DispRisk_PBA40_Q1Q2_TRAs',y_diff,dbp] = metrics_dict[runid,metric_id,'Num_Q1Q2_inTRAs_disp',y_diff,dbp] / \
metrics_dict[runid,metric_id,'Num_Q1Q2HH_inTRAs',y2,dbp]
######## Displacement from Growth Geographies
# Calculating GG rows that lost inc1 Households
GG_sum_df['hhq1_pct_2015'] = GG_sum_df['hhq1_2015'] / GG_sum_df['tothh_2015']
#GG_sum_df['hhq1_pct_2015_normalized'] = GG_sum_df['hhq1_pct_2015'] * normalize_factor_Q1
GG_sum_df['hhq1_pct_2050'] = GG_sum_df['hhq1_2050'] / GG_sum_df['tothh_2050']
# Total number of GGs
metrics_dict[runid,metric_id,'Num_GGs_total',y1,dbp] = GG_sum_df['PDA_ID'].nunique()
# Total number of Transit Rich GGs
GG_TRich_sum_df = GG_sum_df[GG_sum_df['Designation']=="Transit-Rich"]
metrics_dict[runid,metric_id,'Num_GGs_TRich_total',y1,dbp] = GG_TRich_sum_df['PDA_ID'].nunique()
# Calculating number of GGs that Lost LIHH as a share of total HH, with "lost" defined as any loss, or 10% loss
for i in [0, 10]:
if i == 0:
j = 1
else:
j = 0.9
GG_sum_df['lost_hhq1_%dpct' % i] = GG_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
GG_TRich_sum_df['lost_hhq1_%dpct' % i] = GG_TRich_sum_df.apply (lambda row: check_losthhq1_share(row,j), axis=1)
# Number or percent of GGs that lost Q1 households as a proportion of total HH
metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp] = GG_sum_df.loc[(GG_sum_df['lost_hhq1_%dpct' % i] == 1), 'PDA_ID'].nunique()
metrics_dict[runid,metric_id,'Pct_GG_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_GGs_total',y1,dbp])
print('Number of GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_GG_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_GG_lostLIHH_%dpct' % i,y_diff,dbp] )
# Number or percent of Transit Rich GGs that lost Q1 households as a proportion of total HH
metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] = GG_TRich_sum_df.loc[(GG_TRich_sum_df['lost_hhq1_%dpct' % i] == 1), 'PDA_ID'].nunique()
metrics_dict[runid,metric_id,'Pct_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] = float(metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp]) / float(metrics_dict[runid,metric_id,'Num_GGs_TRich_total',y1,dbp])
print('Number of Transit Rich GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Num_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] )
print('Pct of Transit Rich GGs that lost LIHH from 2015 to 2050: ',metrics_dict[runid,metric_id,'Pct_GG_TRich_lostLIHH_%dpct' % i,y_diff,dbp] )
tract_sum_filename = 'C:/Users/{}/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/tract_summary_output.csv'.format(os.getenv('USERNAME'))
tract_sum_df.to_csv(tract_sum_filename, header=True, sep=',')
def calculate_Healthy1_HHs_SLRprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
# Renaming Parcels as "Protected", "Unprotected", and "Unaffected"
'''
#Basic
def label_SLR(row):
if (row['SLR'] == 12): return 'Unprotected'
elif (row['SLR'] == 24): return 'Unprotected'
elif (row['SLR'] == 36): return 'Unprotected'
elif (row['SLR'] == 100): return 'Protected'
else: return 'Unaffected'
parcel_sum_df['SLR_protection'] = parcel_sum_df.apply (lambda row: label_SLR(row), axis=1)
'''
def label_SLR(row):
if ((row['SLR'] == 12) or (row['SLR'] == 24) or (row['SLR'] == 36)): return 'Unprotected'
elif row['SLR'] == 100: return 'Protected'
else: return 'Unaffected'
parcel_sum_df['SLR_protection'] = parcel_sum_df.apply (lambda row: label_SLR(row), axis=1)
# Calculating protected households
# All households
tothh_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2050'].sum()
tothh_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2050'].sum()
tothh_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'tothh_2015'].sum()
tothh_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'tothh_2015'].sum()
# Q1 Households
hhq1_2050_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2050'].sum()
hhq1_2050_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2050'].sum()
hhq1_2015_affected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("rotected") == True), 'hhq1_2015'].sum()
hhq1_2015_protected = parcel_sum_df.loc[(parcel_sum_df['SLR_protection'].str.contains("Protected") == True), 'hhq1_2015'].sum()
# CoC Households
CoChh_2050_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2050_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2050'].sum()
CoChh_2015_affected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("rotected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
CoChh_2015_protected = parcel_sum_df.loc[((parcel_sum_df['SLR_protection'].str.contains("Protected") == True) & \
parcel_sum_df['coc_flag_pba2050']==1), 'tothh_2015'].sum()
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2,dbp] = tothh_2050_protected / tothh_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2,dbp] = hhq1_2050_protected / hhq1_2050_affected
metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2,dbp] = CoChh_2050_protected / CoChh_2050_affected
print('********************H1 Healthy********************')
print('Pct of HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_tothh',y2,dbp])
print('Pct of Q1 HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_hhq1',y2,dbp])
print('Pct of CoC HHs affected by 3ft SLR that are protected in 2050 in %s' % dbp,metrics_dict[runid,metric_id,'SLR_protected_pct_affected_CoChh',y2,dbp])
def calculate_Healthy1_HHs_EQprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
'''
# Reading building codes file, which has info at building level, on which parcels are inundated and protected
buildings_code = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/buildings_with_eq_code.csv')
buildings_eq = pd.merge(left=buildings_code[['building_id', 'parcel_id', 'residential_units', 'year_built', 'earthquake_code']], right=parcel_sum_df[['parcel_id','zone_id','tract_id','coc_flag_pba2050','pba50chcat','hhq1_2015','hhq1_2050','tothh_2015','tothh_2050']], left_on="parcel_id", right_on="parcel_id", how="left")
buildings_eq = pd.merge(left=buildings_eq, right=coc_flag[['tract_id_coc','county_fips']], left_on="tract_id", right_on="tract_id_coc", how="left")
buildings_cat = pd.read_csv('C:/Users/ATapase/Box/Horizon and Plan Bay Area 2050/Equity and Performance/7_Analysis/Metrics/Healthy/building_eq_categories.csv')
buildings_eq = pd.merge(left=buildings_eq, right=buildings_cat, left_on="earthquake_code", right_on="building_eq_code", how="inner")
buildings_eq.drop(['building_eq_code', 'tract_id_coc'], axis=1, inplace=True)
buildings_eq['cost_retrofit_total'] = buildings_eq['residential_units'] * buildings_eq['cost_retrofit']
# Calculated protected households in PLus
# Number of Units retrofitted
metrics_dict['H2_eq_num_units_retrofit'] = buildings_eq['residential_units'].sum()
metrics_dict['H2_eq_num_CoC_units_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'residential_units'].sum()
metrics_dict['H2_eq_total_cost_retrofit'] = buildings_eq['cost_retrofit_total'].sum()
metrics_dict['H2_eq_CoC_cost_retrofit'] = buildings_eq.loc[(buildings_eq['coc_flag_pba2050']== 1), 'cost_retrofit_total'].sum()
print('Total number of units retrofited',metrics_dict['H2_eq_num_units_retrofit'])
print('CoC number of units retrofited',metrics_dict['H2_eq_num_CoC_units_retrofit'])
print('Total cost of retrofit',metrics_dict['H2_eq_total_cost_retrofit'])
print('CoC cost of retrofit',metrics_dict['H2_eq_CoC_cost_retrofit'])
'''
def calculate_Healthy1_HHs_WFprotected(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = "H1"
'''
#
'''
def calculate_Healthy1_safety(runid, year, dbp, tm_taz_input_df, safety_df, metrics_dict):
metric_id = "H1"
population = tm_taz_input_df.TOTPOP.sum()
per_x_people = 1000000
print('population %d' % population)
fatalities = safety_df.loc[(safety_df['index']=="N_total_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_m = safety_df.loc[(safety_df['index']=="N_motorist_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_b = safety_df.loc[(safety_df['index']=="N_bike_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
fatalities_p = safety_df.loc[(safety_df['index']=="N_ped_fatalities") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
injuries = safety_df.loc[(safety_df['index']=="N_injuries") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_annual_per_MNppl',year,dbp] = fatalities / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_auto_annual_per_MNppl',year,dbp] = fatalities_m / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_bike_annual_per_MNppl',year,dbp] = fatalities_b / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_ped_annual_per_MNppl',year,dbp] = fatalities_p / population * per_x_people
metrics_dict[runid,metric_id,'injuries_annual_per_MNppl',year,dbp] = injuries / population * per_x_people
metrics_dict[runid,metric_id,'fatalities_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_total_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_auto_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_motorist_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_bike_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_bike_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'fatalities_ped_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_ped_fatalities_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
metrics_dict[runid,metric_id,'injuries_annual_per_100MVMT',year,dbp] = safety_df.loc[(safety_df['index']=="N_injuries_per_100M_VMT") & (safety_df['modelrunID'].str.contains(dbp)), 'value'].sum()
def calculate_Healthy2_emissions(runid, year, dbp, tm_taz_input_df, tm_auto_times_df, emfac_df, metrics_dict):
metric_id = "H2"
population = tm_taz_input_df.TOTPOP.sum()
tm_auto_times_df = tm_auto_times_df.sum(level='Mode')
dailyVMT = tm_auto_times_df['Vehicle Miles'].sum() - tm_auto_times_df.loc['truck', ['Vehicle Miles']].sum()
metrics_dict[runid,metric_id,'daily_vmt_per_capita',year,dbp] = dailyVMT / population
metrics_dict[runid,metric_id,'daily_vmt_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'VMT per capita'].sum()
metrics_dict[runid,metric_id,'daily_vmt_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'VMT per capita'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2015","2015"] = emfac_df.loc[(emfac_df['dbp']==2015), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_lbs_per_capita',"2050","Plus"] = 0
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2005","2005"] = emfac_df.loc[(emfac_df['dbp']==2005), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2015","2015"] = emfac_df.loc[(emfac_df['dbp']==2015), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2035","2035"] = emfac_df.loc[(emfac_df['dbp']==2035), 'Total CO2 Emissions Per Capita (lbs)'].sum()
metrics_dict["emfac_hardcode",metric_id,'ghg_emissions_nonSB375_lbs_per_capita',"2050","Plus"] = 0
def calculate_Vibrant1_JobsHousing(runid, dbp, county_sum_df, metrics_dict):
metric_id = "V1"
metrics_dict[runid,metric_id,'jobs_housing_ratio_region',y1,dbp] = county_sum_df['totemp_2015'].sum() / county_sum_df['tothh_2015'].sum()
metrics_dict[runid,metric_id,'jobs_housing_ratio_region',y2,dbp] = county_sum_df['totemp_2050'].sum() / county_sum_df['tothh_2050'].sum()
for index,row in county_sum_df.iterrows():
metrics_dict[runid,metric_id,'jobs_housing_ratio_%s' % row['county'],y1,dbp] = row['totemp_2015'] / row['tothh_2015']
metrics_dict[runid,metric_id,'jobs_housing_ratio_%s' % row['county'],y2,dbp] = row['totemp_2050'] / row['tothh_2050']
def calculate_Vibrant1_median_commute(runid, year, dbp, tm_commute_df, metrics_dict):
metric_id = "V1"
tm_commute_df['total_commute_miles'] = tm_commute_df['freq'] * tm_commute_df['distance']
commute_dist_df = tm_commute_df[['incQ','freq','total_commute_miles']].groupby(['incQ']).sum()
metrics_dict[runid,metric_id,'mean_commute_distance',year,dbp] = commute_dist_df['total_commute_miles'].sum() / commute_dist_df['freq'].sum()
metrics_dict[runid,metric_id,'mean_commute_distance_inc1',year,dbp] = commute_dist_df['total_commute_miles'][1] / commute_dist_df['freq'][1]
metrics_dict[runid,metric_id,'mean_commute_distance_inc2',year,dbp] = commute_dist_df['total_commute_miles'][2] / commute_dist_df['freq'][2]
metrics_dict[runid,metric_id,'mean_commute_distance_inc3',year,dbp] = commute_dist_df['total_commute_miles'][3] / commute_dist_df['freq'][3]
metrics_dict[runid,metric_id,'mean_commute_distance_inc4',year,dbp] = commute_dist_df['total_commute_miles'][4] / commute_dist_df['freq'][4]
def calculate_Vibrant2_Jobs(runid, dbp, parcel_sum_df, metrics_dict):
metric_id = 'V2'
print('********************V2 Vibrant********************')
# Total Jobs Growth
metrics_dict[runid,metric_id,'Total_jobs',y2,dbp] = parcel_sum_df['totemp_2050'].sum()
metrics_dict[runid,metric_id,'Total_jobs',y1,dbp] = parcel_sum_df['totemp_2015'].sum()
metrics_dict[runid,metric_id,'Total_jobs_growth',y_diff,dbp] = metrics_dict[runid,metric_id,'Total_jobs',y2,dbp]/metrics_dict[runid,metric_id,'Total_jobs',y1,dbp] - 1
print('Number of Jobs in 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs',y2,dbp])
print('Number of Jobs in 2015 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs',y1,dbp])
print('Job Growth from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs_growth',y_diff,dbp])
# MWTEMPN jobs
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp] = parcel_sum_df['MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp] = parcel_sum_df['MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'Total_jobs_growth_MWTEMPN',y_diff,dbp] = metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp]/metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp] - 1
print('Number of Total MWTEMPN Jobs 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y2,dbp])
print('Number of Total MWTEMPN Jobs 2015 %s' % dbp,metrics_dict[runid,metric_id,'Total_MWTEMPN_jobs',y1,dbp])
print('Job Growth Total MWTEMPN from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'Total_jobs_growth_MWTEMPN',y_diff,dbp])
# Jobs Growth in PPAs
metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'totemp_2050'].sum()
metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'totemp_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_PPA',y_diff,dbp] = metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp]/metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp] - 1
print('Number of Jobs in PPAs 2050 %s' % dbp,metrics_dict[runid,metric_id,'PPA_jobs',y2,dbp])
print('Number of Jobs in PPAs 2015 %s' % dbp,metrics_dict[runid,metric_id,'PPA_jobs',y1,dbp])
print('Job Growth in PPAs from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'jobs_growth_PPA',y_diff,dbp])
'''
AGREMPN = Agriculture & Natural Resources
MWTEMPN = Manufacturing & Wholesale, Transportation & Utilities
RETEMPN = Retail
FPSEMPN = Financial & Leasing, Professional & Managerial Services
HEREMPN = Health & Educational Services
OTHEMPN = Construction, Government, Information
totemp = total employment
'''
# Jobs Growth MWTEMPN in PPAs (Manufacturing & Wholesale, Transportation & Utilities)
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'MWTEMPN_2050'].sum()
metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp] = parcel_sum_df.loc[parcel_sum_df['pba50chcat'].str.contains('ppa', na=False), 'MWTEMPN_2015'].sum()
metrics_dict[runid,metric_id,'jobs_growth_MWTEMPN_PPA',y_diff,dbp] = metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp]/metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp] - 1
print('Number of MWTEMPN Jobs in PPAs 2050 %s' % dbp,metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y2,dbp])
print('Number of MWTEMPN Jobs in PPAs 2015 %s' % dbp,metrics_dict[runid,metric_id,'PPA_MWTEMPN_jobs',y1,dbp])
print('Job Growth MWTEMPN in PPAs from 2015 to 2050 %s' % dbp,metrics_dict[runid,metric_id,'jobs_growth_MWTEMPN_PPA',y_diff,dbp])
def calculate_travelmodel_metrics_change(list_tm_runid_blueprintonly, metrics_dict):
for tm_runid in list_tm_runid_blueprintonly:
year = tm_runid[:4]
if "Basic" in tm_runid:
dbp = "Basic"
elif "Plus" in tm_runid:
dbp = "Plus"
#elif "PlusCrossing_01" in tm_runid:
# dbp = "Plus_01"
#elif "PlusFixItFirst" in tm_runid:
# dbp = "PlusFixItFirst"
else:
dbp = "Unknown"
metric_id = "A1"
# Tolls
metrics_dict[tm_runid,metric_id,'tolls_per_HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_HH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_LIHH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_LIHH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_LIHH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_LIHH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_LIHH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_LIHH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'tolls_per_inc1HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'tolls_per_inc1HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'tolls_per_inc1HH',y2,"NoProject"] - 1
# Transit Fares
metrics_dict[tm_runid,metric_id,'fares_per_HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_HH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'fares_per_LIHH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_LIHH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_LIHH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_LIHH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_LIHH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_LIHH',y2,"NoProject"] - 1
metrics_dict[tm_runid,metric_id,'fares_per_inc1HH_change_2015',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_inc1HH',year,dbp] / metrics_dict[tm_2015_runid,metric_id,'fares_per_inc1HH',y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'fares_per_inc1HH_change_2050noproject',year,dbp] = metrics_dict[tm_runid,metric_id,'fares_per_inc1HH',year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'fares_per_inc1HH',y2,"NoProject"] - 1
metric_id = "C2"
# Highway corridor travel times
for route in ['Antioch_SF','Vallejo_SF','SanJose_SF','Oakland_SanJose','Oakland_SF']:
metrics_dict[tm_runid,metric_id,'travel_time_AM_change_2015_%s' % route,year,dbp] = metrics_dict[tm_runid,metric_id,'travel_time_AM_%s' % route,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'travel_time_AM_%s' % route,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'travel_time_AM_change_2050noproject_%s' % route,year,dbp] = metrics_dict[tm_runid,metric_id,'travel_time_AM_%s' % route,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'travel_time_AM_%s' % route,y2,'NoProject'] - 1
# Transit Crowding by operator
for operator in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local','GGT Express','WETA']:
try:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2015_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,y1,'2015'] - 1
except:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2015_%s' % operator,year,dbp] = 0
try:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2050noproject_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'crowded_pct_personhrs_AM_%s' % operator,y2,'NoProject'] - 1
except:
metrics_dict[tm_runid,metric_id,'crowded_pct_personhrs_AM_change_2050noproject_%s' % operator,year,dbp] = 0
# Transit travel times by operator
for operator in ['AC Transit Local','AC Transit Transbay','SFMTA LRT','SFMTA Bus','VTA Bus Local','VTA LRT','BART','Caltrain','SamTrans Local']:
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2015_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % operator,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'time_per_dist_AM_%s' % operator,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2050noproject_%s' % operator,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % operator,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'time_per_dist_AM_%s' % operator,y2,'NoProject'] - 1
# Transit travel times by mode
for mode_name in ['Local','Express','Ferry','Light Rail','Heavy Rail','Commuter Rail']:
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2015_%s' % mode_name,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % mode_name,year,dbp] / metrics_dict[tm_2015_runid,metric_id,'time_per_dist_AM_%s' % mode_name,y1,'2015'] - 1
metrics_dict[tm_runid,metric_id,'time_per_dist_AM_change_2050noproject_%s' % mode_name,year,dbp] = metrics_dict[tm_runid,metric_id,'time_per_dist_AM_%s' % mode_name,year,dbp] / metrics_dict[tm_2050_DBP_NoProject_runid,metric_id,'time_per_dist_AM_%s' % mode_name,y2,'NoProject'] - 1
def parcel_building_output_sum(urbansim_runid):
#################### creating parcel level df from buildings output
building_output_2050 = pd.read_csv((urbansim_runid+'_building_data_2050.csv'))
building_output_2015 = pd.read_csv((urbansim_runid+'_building_data_2015.csv'))
parcel_building_output_2050 = building_output_2050[['parcel_id','residential_units','deed_restricted_units']].groupby(['parcel_id']).sum()
parcel_building_output_2015 = building_output_2015[['parcel_id','residential_units','deed_restricted_units']].groupby(['parcel_id']).sum()
parcel_building_output_2050 = parcel_building_output_2050.add_suffix('_2050')
parcel_building_output_2015 = parcel_building_output_2015.add_suffix('_2015')
return pd.merge(left=parcel_building_output_2050, right=parcel_building_output_2015, left_on="parcel_id", right_on="parcel_id", how="left")
def calc_pba40urbansim():
urbansim_runid = 'C:/Users/{}/Box/Modeling and Surveys/Share Data/plan-bay-area-2040/RTP17 UrbanSim Output/r7224c/run7224'.format(os.getenv('USERNAME'))
runid = "plan-bay-area-2040/RTP17 UrbanSim Output/r7224c/run7224"
dbp = "PBA40"
metric_id = "Overall"
year2 = "2040"
year1 = "2010"
yeardiff = "2040"
parcel_geo_df = pd.read_csv(parcel_geography_file)
################## Creating parcel summary
hhq_list = ['hhq1','hhq2','hhq3','hhq4']
emp_list = ['AGREMPN','MWTEMPN','RETEMPN','FPSEMPN','HEREMPN','OTHEMPN']
parcel_output_2040_df = pd.read_csv((urbansim_runid+'_parcel_data_2040.csv'))
parcel_output_2040_df['tothh'] = parcel_output_2040_df[hhq_list].sum(axis=1, skipna=True)
parcel_output_2040_df['totemp'] = parcel_output_2040_df[emp_list].sum(axis=1, skipna=True)
parcel_output_2010_df = pd.read_csv((urbansim_runid+'_parcel_data_2010.csv'))
parcel_output_2010_df['tothh'] = parcel_output_2010_df[hhq_list].sum(axis=1, skipna=True)
parcel_output_2010_df['totemp'] = parcel_output_2010_df[emp_list].sum(axis=1, skipna=True)
# keeping essential columns / renaming columns
parcel_output_2040_df.drop(['x','y','zoned_du','zoned_du_underbuild', 'zoned_du_underbuild_nodev', 'first_building_type_id'], axis=1, inplace=True)
parcel_output_2010_df.drop(['x','y','zoned_du','zoned_du_underbuild', 'zoned_du_underbuild_nodev', 'first_building_type_id'], axis=1, inplace=True)
parcel_output_2040_df = parcel_output_2040_df.add_suffix('_2040')
parcel_output_2010_df = parcel_output_2010_df.add_suffix('_2010')
# creating parcel summaries with 2040 and 2010 outputs, and parcel geographic categories
parcel_sum_df =
|
pd.merge(left=parcel_output_2040_df, right=parcel_output_2010_df, left_on="parcel_id_2040", right_on="parcel_id_2010", how="left")
|
pandas.merge
|
import pandas as pd
import numpy as np
import datetime
class Durations(object):
@classmethod
def set(cls, X, extract_cols, dataset):
print("... ... Durations")
all_df = dataset["all_df"]
# duration from first action to clickout
dffac_df = all_df[["session_id", "timestamp", "timestamp_dt"]].groupby(
"session_id").first().reset_index()
dffac_df = dffac_df[["session_id", "timestamp_dt"]]
dffac_df.columns = ["session_id", "first_timestamp_dt"]
X = pd.merge(X, dffac_df, on="session_id", how="left")
X["session_duration"] = X.apply(lambda x: (x.timestamp_dt - x.first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["session_duration"]
del dffac_df
# duration from last distination to clickout
dflsc_df = all_df[["session_id", "_session_id", "timestamp", "timestamp_dt"]].groupby(
"_session_id").first().reset_index()
dflsc_df = dflsc_df[dflsc_df._session_id.isin(X._session_id)]
dflsc_df = dflsc_df[["session_id", "timestamp_dt"]]
dflsc_df.columns = ["session_id", "step_first_timestamp_dt"]
X = pd.merge(X, dflsc_df, on="session_id", how="left")
X["step_duration"] = X.apply(lambda x: (x.timestamp_dt - x.step_first_timestamp_dt).seconds, axis=1)
extract_cols = extract_cols + ["step_duration"]
del dflsc_df
return (X, extract_cols)
class JustClickout(object):
@classmethod
def set(cls, X, extract_cols):
print("... ... JustClickout")
# append current fillters
def get_cf_features(x):
sbp = 1 if "Sort by Price" in x.current_filters else 0
sbd = 1 if "Sort By Distance" in x.current_filters else 0
sbr = 1 if "Sort By Rating" in x.current_filters else 0
fod = 1 if "Focus on Distance" in x.current_filters else 0
fsr = 1 if "Focus on Rating" in x.current_filters else 0
bev = 1 if "Best Value" in x.current_filters else 0
return pd.Series({'cf_sbp': sbp
, 'cf_sbd': sbd
, 'cf_sbr': sbr
, 'cf_fod': fod
, 'cf_fsr': fsr
, 'cf_bev': bev})
X["current_filters"] = X["current_filters"].fillna("")
curf_df = X[["current_filters"]].apply(lambda x: get_cf_features(x), axis=1)
X = pd.concat([X, curf_df], axis=1)
extract_cols = extract_cols + list(curf_df.columns)
del curf_df
return (X, extract_cols)
class JustBeforeClickout(object):
@classmethod
def set(cls, X, dataset):
print("... ... JustBeforeClickout")
all_df = dataset["all_df"]
# last action_type
lasttype_df = all_df[["session_id", "action_type", "is_y"]].copy()
lasttype_df["lat"] = lasttype_df["action_type"].shift(1)
lasttype_df["last_session_id"] = lasttype_df["session_id"].shift(1)
lasttype_df = lasttype_df[lasttype_df.is_y == 1]
lasttype_df = lasttype_df[lasttype_df.session_id == lasttype_df.last_session_id]
lasttype_df = lasttype_df[["session_id", "lat"]]
onehot_lat = pd.get_dummies(lasttype_df, columns=['lat'])
X = pd.merge(X, onehot_lat, on="session_id", how="left")
lat_cols = list(onehot_lat.columns)
lat_cols.remove("session_id")
for lat_col in lat_cols:
X[lat_col] = X[lat_col].fillna(0)
del lasttype_df
del onehot_lat
return X
class Record2Impression(object):
@classmethod
def expand(cls, X, extract_cols, dataset):
print("... ... Record2Impression")
# create expanded
X = X.reset_index()
X["gid"] = X.index
X["n_imps"] = X[["impressions"]].apply(lambda x: len(str(x.impressions).split("|")), axis=1)
X["price_mean"] = X[["prices"]].apply(lambda x: np.mean(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["price_std"] = X[["prices"]].apply(lambda x: np.std(np.array(str(x.prices).split("|")).astype(int)), axis=1)
X["impression"] = X[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
X["price"] = X[["prices"]].apply(lambda x: str(x.prices).split("|"), axis=1)
X_impression = X[["gid", "impression"]].set_index('gid').impression.apply(pd.Series).stack().reset_index(
level=0).rename(columns={0: 'impression'})
X_price = X[["gid", "price"]].set_index('gid').price.apply(pd.Series).stack().reset_index(level=0).rename(
columns={0: 'price'})
X_position = X[["gid", "impression"]].set_index('gid').impression.apply(
lambda x: pd.Series(range(len(x)))).stack().reset_index(level=0).rename(columns={0: 'position'})
X_expanded = pd.concat([X_impression, X_price], axis=1)
X_expanded = pd.concat([X_expanded, X_position], axis=1)
X_expanded.columns = ["gid", "impression", "gid2", "price", "gid3", "position"]
X_expanded = X_expanded[["gid", "impression", "price", "position"]]
# join expaned
X = pd.merge(X_expanded, X[["gid", "n_imps", "price_mean", "price_std"] + extract_cols], on="gid", how="left")
# to normalize position and price
X["pos_rate"] = X["position"] / X["n_imps"]
X["pos"] = X["position"] + 1
X["price_norm"] = (X["price"].astype(float) - X["price_mean"].astype(float)) / X["price_std"].astype(float)
# join price_norm rank
pnorm_rank_df = X[["session_id", "price_norm"]].copy()
pnorm_rank_df = pnorm_rank_df[["session_id", "price_norm"]].groupby("session_id").rank(ascending=False)
pnorm_rank_df.columns = ["price_norm_rank"]
X = pd.concat([X, pnorm_rank_df], axis=1)
del pnorm_rank_df
# calc discount rate
X["price"] = X["price"].astype(float)
prices_df = X[["impression", "price"]].groupby("impression").agg({'price': np.mean}).reset_index()
prices_df.columns = ["impression", "item_price_mean"]
X = pd.merge(X, prices_df, on="impression", how="left")
X["discount_rate"] = X["price"] / X["item_price_mean"]
del prices_df
# append some important props and other props with over 0.2 coverage
sum_item_props_df = dataset["sum_item_props_df"]
item_props = dataset["item_props"]
prop_cols = ["pGood Rating"
, "pVery Good Rating"
, "pExcellent Rating"
, "pSatisfactory Rating"
, "p1 Star"
, "p2 Star"
, "p3 Star"
, "p4 Star"
, "p5 Star"
, "pBusiness Centre"
, "pBusiness Hotel"
, "pConference Rooms"]
c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
prop_cols = prop_cols + c02over_prop_cols
prop_cols = list(set(prop_cols))
X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
X[prop_cols] = X[prop_cols].fillna(0)
return (X, extract_cols)
class DecisionMakingProcess(object):
@classmethod
def detect(cls, X, dataset):
print("... ... Decision Making Process")
print("... ... ... Attention and Perceptual Encoding")
print("... ... ... Information Acquisition and Evaluation")
all_df = dataset["all_df"]
# join pos stats"
copos_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "impressions", "is_y"]].copy()
copos_df = copos_df[copos_df.is_y == 0]
copos_df["impression"] = copos_df[["impressions"]].apply(lambda x: str(x.impressions).split("|"), axis=1)
copos_df["co_pos"] = copos_df[["impression", "reference"]].apply(
lambda x: x.impression.index(x.reference) + 1 if x.reference in x.impression else 1, axis=1)
copos_df_stats = copos_df[["session_id", "co_pos"]].groupby("session_id").agg(
{'co_pos': [np.min, np.max, np.mean]}).reset_index()
copos_df_stats.columns = ["session_id", "co_pos_min", "co_pos_max", "co_pos_mean"]
X = pd.merge(X, copos_df_stats, on="session_id", how="left")
X["co_pos_min"] = X["co_pos_min"].fillna(1)
X["co_pos_mean"] = X["co_pos_mean"].fillna(1)
X["co_pos_max"] = X["co_pos_max"].fillna(1)
X["co_pos_min_diff"] = X["pos"] - X["co_pos_min"]
X["co_pos_mean_diff"] = X["pos"] - X["co_pos_mean"]
X["clickouted_pos_max_diff"] = X["co_pos_max"] - X["pos"]
del copos_df
del copos_df_stats
# is_last and is_last_elapsed_time
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
lastref_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
lastref_df["is_target"] = 0
lastref_df.loc[lastref_df.is_y == 1, "is_target"] = 1
lastref_df = lastref_df[lastref_df.action_type.isin(action_types)]
lastref_df["last_session_id"] = lastref_df["session_id"].shift(1)
lastref_df["last_reference"] = lastref_df["reference"].shift(1)
lastref_df["last_timestamp"] = lastref_df["timestamp"].shift(1)
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_session_id]
lastref_df = lastref_df[lastref_df.is_target == 1][["session_id", "last_reference", "last_timestamp"]]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_reference"]] = X[["last_reference"]].fillna("-1")
X[["last_timestamp"]] = X[["last_timestamp"]].fillna(-1)
X["is_last"] = X[["impression", "last_reference"]].apply(lambda x: 1 if x.impression == x.last_reference else 0,
axis=1)
X["elapsed_time_between_is_last"] = X[["impression", "last_reference", "timestamp", "last_timestamp"]].apply(
lambda x: int(x.timestamp) - int(x.last_timestamp) if x.impression == x.last_reference else np.nan, axis=1)
lastdur_df = X[["session_id", "elapsed_time_between_is_last"]].copy()
lastdur_df = lastdur_df.dropna(axis=0, how='any')
X.drop("elapsed_time_between_is_last", axis=1, inplace=True)
X = pd.merge(X, lastdur_df, on="session_id", how="left")
del lastref_df
del lastdur_df
# join is_last_last
lastref_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
lastref_df["last_last_session_id"] = lastref_df["session_id"].shift(2)
lastref_df["last_last_reference"] = lastref_df["reference"].shift(2)
lastref_df = lastref_df[lastref_df.is_y == 1]
lastref_df = lastref_df[lastref_df.session_id == lastref_df.last_last_session_id]
lastref_df = lastref_df[["session_id", "last_last_reference"]]
lastref_df = lastref_df[~lastref_df.duplicated()]
X = pd.merge(X, lastref_df, on="session_id", how="left")
X[["last_last_reference"]] = X[["last_last_reference"]].fillna("-1")
X["is_last_last"] = X[["impression", "last_last_reference"]].apply(
lambda x: 1 if x.impression == x.last_last_reference else 0, axis=1)
del lastref_df
# elapsed next mean by item "it's kind of a future information."
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
isnext_df = all_df[["session_id", "action_type", "reference", "timestamp", "is_y"]].copy()
isnext_df["next_session_id"] = isnext_df["session_id"].shift(-1)
isnext_df["next_timestamp"] = isnext_df["timestamp"].shift(-1)
isnext_df = isnext_df[isnext_df.session_id == isnext_df.next_session_id]
isnext_df["elapsed_next"] = isnext_df["next_timestamp"] - isnext_df["timestamp"]
isnext_df = isnext_df[isnext_df.action_type.isin(action_types)]
isnext_df = isnext_df[isnext_df.is_y == 0]
isnext_gp_df = isnext_df[["reference", "elapsed_next"]].groupby("reference").agg(
{"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_gp_df
isnext_gp_df = isnext_df[isnext_df.action_type == "clickout item"][["reference", "elapsed_next"]].groupby(
"reference").agg({"elapsed_next": np.mean}).reset_index()
isnext_gp_df.columns = ["impression", "next_elapsed_time_byco"]
X = pd.merge(X, isnext_gp_df, on="impression", how="left")
del isnext_df
del isnext_gp_df
# clickouted item during session
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
X = pd.merge(X, couted_df, on=["session_id", "impression"], how="left")
X["clickouted"] = X["clickouted"].fillna(0)
X["clickouted"] = X["clickouted"].astype(int)
# diff between clickouted price mean
co_price_df = all_df[all_df.action_type == "clickout item"][
["session_id", "reference", "prices", "impressions", "is_y"]].copy()
co_price_df = co_price_df[co_price_df.is_y == 0] # to prevent leakage
def get_price(reference, impressions, prices):
imps = str(impressions).split("|")
prs = str(prices).split("|")
if reference in imps:
return prs[imps.index(reference)]
else:
return 0
co_price_df["price"] = co_price_df.apply(lambda x: get_price(x.reference, x.impressions, x.prices), axis=1)
co_price_df["price"] = co_price_df["price"].astype(float)
co_price_df = co_price_df.groupby("session_id").agg({'price': np.mean}).reset_index()
co_price_df.columns = ["session_id", "couted_price_mean"]
X = pd.merge(X, co_price_df, on="session_id", how="left")
X["couted_price_mean"] = X["couted_price_mean"].fillna(-1)
X["clickouted_price_diff"] = X["price"].astype(float) / X["couted_price_mean"]
X.loc[X.clickouted_price_diff < 0, "clickouted_price_diff"] = 0
del co_price_df
# set two above displayed item and five below displayed item
u_cols = []
def set_undert_the_clickouted_and_islast(X, target_col, nu=5):
u_col = target_col + "_u"
X[u_col] = X["session_id"]
X.loc[X[target_col] != 1, u_col] = ""
for u in [_ for _ in range(-2, nu + 1, 1) if _ != 0]:
new_col = u_col + str(u).replace("-", "p")
X[new_col] = X[u_col].shift(u)
X[new_col] = X[new_col].fillna("")
X.loc[X[new_col] == X["session_id"], new_col] = "1"
X.loc[X[new_col] != "1", new_col] = 0
X.loc[X[new_col] == "1", new_col] = 1
u_cols.append(new_col)
X.drop(u_col, axis=1, inplace=True)
set_undert_the_clickouted_and_islast(X, "clickouted", 5)
set_undert_the_clickouted_and_islast(X, "is_last", 5)
# sum of number of above displayed item
u_coted_cols = [col for col in u_cols if "clickouted" in col]
u_islast_col = [col for col in u_cols if "is_last" in col]
X["clickouted_sum"] = X[u_coted_cols].sum(axis=1)
X["is_last_sum"] = X[u_islast_col].sum(axis=1)
# step_elapsed_mean which represents velocity of user activities.
selapsed_df = all_df[["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference"]].copy()
selapsed_df["pre_timestamp"] = selapsed_df["timestamp"].shift(1)
selapsed_df["pre_timestamp_dt"] = selapsed_df["timestamp_dt"].shift(1)
selapsed_df["pre_session_id"] = selapsed_df["session_id"].shift(1)
selapsed_df = selapsed_df[selapsed_df.session_id == selapsed_df.pre_session_id]
selapsed_df["elapsed"] = selapsed_df["timestamp"] - selapsed_df["pre_timestamp"]
selapsed_df = selapsed_df[["session_id", "elapsed"]]
selapsed_df = selapsed_df[selapsed_df.elapsed.notna()]
selapsed_df = selapsed_df[selapsed_df.elapsed > 0]
selapsed_df = selapsed_df.groupby("session_id").agg({"elapsed": np.mean}).reset_index()
selapsed_df.columns = ["session_id", "step_elapsed_mean"]
X = pd.merge(X, selapsed_df, on="session_id", how="left")
del selapsed_df
# last duration all "is it same as is_last_elapsed_time?"
lduration_all_df = all_df[["session_id", "action_type", "timestamp", "is_y"]].copy()
lduration_all_df["pre_timestamp"] = lduration_all_df["timestamp"].shift(1)
lduration_all_df["pre_session_id"] = lduration_all_df["session_id"].shift(1)
lduration_all_df = lduration_all_df[lduration_all_df.session_id == lduration_all_df.pre_session_id]
lduration_all_df["elapsed_time"] = lduration_all_df["timestamp"] - lduration_all_df["pre_timestamp"]
lduration_all_df = lduration_all_df[lduration_all_df.is_y == 1]
lduration_all_df = lduration_all_df[["session_id", "elapsed_time"]]
X = pd.merge(X, lduration_all_df, on="session_id", how="left")
del lduration_all_df
# first action_type
firsta_df = all_df[["session_id", "_session_id", "action_type", "is_y"]].copy()
firsta_df = firsta_df[firsta_df.is_y == 0] # to prevent leakage
firsta_df = firsta_df.groupby("_session_id").first().reset_index()
firsta_df = firsta_df.groupby("session_id").last().reset_index()
firsta_df.loc[firsta_df["action_type"] == "search for destination", "action_type"] = "fa_sfd"
firsta_df.loc[firsta_df["action_type"] == "interaction item image", "action_type"] = "fa_iii"
firsta_df.loc[firsta_df["action_type"] == "clickout item", "action_type"] = "fa_coi"
firsta_df.loc[firsta_df["action_type"] == "search for item", "action_type"] = "fa_sfi"
firsta_df.loc[firsta_df["action_type"] == "search for poi", "action_type"] = "fa_sfp"
firsta_df.loc[firsta_df["action_type"] == "change of sort order", "action_type"] = "fa_coso"
firsta_df.loc[firsta_df["action_type"] == "filter selection", "action_type"] = "fa_fis"
firsta_df.loc[firsta_df["action_type"] == "interaction item info", "action_type"] = "fa_iiinfo"
firsta_df.loc[firsta_df["action_type"] == "interaction item rating", "action_type"] = "fa_iirat"
firsta_df.loc[firsta_df["action_type"] == "interaction item deals", "action_type"] = "fa_iidea"
firsta_df = firsta_df[["session_id", "action_type"]]
firsta_df.columns = ["session_id", "at"]
onehot_firsta = pd.get_dummies(firsta_df, columns=['at'])
firsta_cols = list(onehot_firsta.columns)
firsta_cols.remove("session_id")
X = pd.merge(X, onehot_firsta, on="session_id", how="left")
for firsta_col in firsta_cols:
X[firsta_col] = X[firsta_col].fillna(0)
del firsta_df
del onehot_firsta
# price norm by item rating prop
X["r6"] = 0
X["r7"] = 0
X["r8"] = 0
X["r9"] = 0
X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
X.loc[X["pGood Rating"] == 1, "r7"] = 7
X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
X["rating"] = X["rating"].fillna(-1)
pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
float)
del pns_df
# price norm by star
X["star"] = -1
X.loc[X["p1 Star"] == 1, "star"] = 1
X.loc[X["p2 Star"] == 1, "star"] = 2
X.loc[X["p3 Star"] == 1, "star"] = 3
X.loc[X["p4 Star"] == 1, "star"] = 4
X.loc[X["p5 Star"] == 1, "star"] = 5
pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
{'price': [np.mean, np.std]}).reset_index()
pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
float)
del pns_df
return X
class ByItem(object):
@classmethod
def set(cls, X, dataset):
print("... ... ByItem")
all_df = dataset["all_df"]
# imps score
impscore_df = dataset["impscore_df"]
item_props = dataset["item_props"]
X = pd.merge(X, impscore_df, on="impression", how="left")
X["impsocre"] = X["impsocre"].fillna(0)
# # append some important props and other props with over 0.2 coverage
# sum_item_props_df = dataset["sum_item_props_df"]
# prop_cols = ["pGood Rating"
# , "pVery Good Rating"
# , "pExcellent Rating"
# , "pSatisfactory Rating"
# , "p1 Star"
# , "p2 Star"
# , "p3 Star"
# , "p4 Star"
# , "p5 Star"
# , "pBusiness Centre"
# , "pBusiness Hotel"
# , "pConference Rooms"]
# c02over_prop_cols = sum_item_props_df[sum_item_props_df.coverage >= 0.2]["prop"].tolist()
# prop_cols = prop_cols + c02over_prop_cols
# prop_cols = list(set(prop_cols))
# X = pd.merge(X, item_props[["item_id"] + prop_cols], left_on="impression", right_on="item_id", how="left")
# X[prop_cols] = X[prop_cols].fillna(0)
# append item svd n_components=10
item_props_svd = dataset["item_props_svd"]
prop_svd_cols = list(item_props_svd.columns)
prop_svd_cols.remove("item_id")
X = pd.merge(X, item_props_svd, left_on="impression", right_on="item_id", how="left")
X[prop_svd_cols] = X[prop_svd_cols].fillna(0)
# # price norm by item rating prop
# X["r6"] = 0
# X["r7"] = 0
# X["r8"] = 0
# X["r9"] = 0
# X.loc[X["pSatisfactory Rating"] == 1, "r6"] = 6
# X.loc[X["pGood Rating"] == 1, "r7"] = 7
# X.loc[X["pVery Good Rating"] == 1, "r8"] = 8
# X.loc[X["pExcellent Rating"] == 1, "r9"] = 9
# X["rating"] = X[["r6", "r7", "r8", "r9"]].apply(
# lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
# X["rating"] = X["rating"].fillna(-1)
# pns_df = X[["session_id", "rating", "price"]].groupby(["session_id", "rating"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "rating", "r_price_mean", "r_price_std"]
# pns_df["r_price_std"] = pns_df["r_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "rating"], how="left")
# X["r_price_norm"] = (X["price"].astype(float) - X["r_price_mean"].astype(float)) / X["r_price_std"].astype(
# float)
# del pns_df
#
# # price norm by star
# X["star"] = -1
# X.loc[X["p1 Star"] == 1, "star"] = 1
# X.loc[X["p2 Star"] == 1, "star"] = 2
# X.loc[X["p3 Star"] == 1, "star"] = 3
# X.loc[X["p4 Star"] == 1, "star"] = 4
# X.loc[X["p5 Star"] == 1, "star"] = 5
# pns_df = X[["session_id", "star", "price"]].groupby(["session_id", "star"]).agg(
# {'price': [np.mean, np.std]}).reset_index()
# pns_df.columns = ["session_id", "star", "s_price_mean", "s_price_std"]
# pns_df["s_price_std"] = pns_df["s_price_std"].fillna(1)
# X = pd.merge(X, pns_df, on=["session_id", "star"], how="left")
# X["s_price_norm"] = (X["price"].astype(float) - X["s_price_mean"].astype(float)) / X["s_price_std"].astype(
# float)
# del pns_df
# item ctr
ctrbyitem_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyitem_df = ctrbyitem_df[ctrbyitem_df.is_y == 0]
ref_df = ctrbyitem_df[["reference"]].groupby(["reference"]).size().reset_index()
ref_df.columns = ["impression", "rcnt"]
ref_df["ctrbyitem"] = ref_df["rcnt"].astype(float) / ref_df.shape[0]
ref_df = ref_df[["impression", "ctrbyitem"]]
X = pd.merge(X, ref_df, on="impression", how="left")
X["ctrbyitem"] = X["ctrbyitem"].fillna(0)
del ctrbyitem_df
del ref_df
# item ctr by city
cr_tmp_df = all_df[all_df.action_type == "clickout item"].copy()
cr_tmp_df = cr_tmp_df[cr_tmp_df.is_y == 0] # to prevent leakage
city_df = cr_tmp_df[["city"]].groupby(["city"]).size().reset_index()
city_df.columns = ["city", "ccnt"]
cityref_df = cr_tmp_df[["city", "reference"]].groupby(["city", "reference"]).size().reset_index()
cityref_df.columns = ["city", "impression", "rcnt"]
cityref_df = pd.merge(cityref_df, city_df, on="city", how="left")
cityref_df["ctrbycity"] = cityref_df["rcnt"].astype(float) / cityref_df["ccnt"].astype(float)
cityref_df = cityref_df[["city", "impression", "ctrbycity"]]
X = pd.merge(X, cityref_df, on=["city", "impression"], how="left")
X["ctrbycity"] = X["ctrbycity"].fillna(0)
del cr_tmp_df
del city_df
del cityref_df
# item ctr by city rank
ctrbycity_rank_df = X[["session_id", "ctrbycity"]].copy()
ctrbycity_rank_df = ctrbycity_rank_df[["session_id", "ctrbycity"]].groupby("session_id").rank(ascending=False)
ctrbycity_rank_df.columns = ["ctrbycity_rank"]
X = pd.concat([X, ctrbycity_rank_df], axis=1)
del ctrbycity_rank_df
# bayes likelihood by item
bayes_likelihood = dataset["bayes_likelihood"]
X["rlr"] = X["impression"].astype(str) + X["last_reference"].astype(str)
def set_bayes_li(rlr):
if rlr in bayes_likelihood:
return bayes_likelihood[rlr]
return 0.0
X["bayes_li"] = X[["rlr"]].apply(lambda x: set_bayes_li(x.rlr), axis=1)
# clickouted item 2 item during session
v2v_counter = dataset["v2v_counter"]
def extract_sv2v_counter(iids):
v = {}
for iid in iids:
if iid in v2v_counter:
for s in v2v_counter[iid]:
if not s in v:
v[s] = v2v_counter[iid][s]
return v
couted_df = all_df[["action_type", "session_id", "reference", "is_y"]].copy()
couted_df = couted_df[couted_df.action_type == "clickout item"]
couted_df = couted_df[couted_df.is_y == 0] # to prevent leakage
couted_df = couted_df[["session_id", "reference"]]
couted_df.columns = ["session_id", "impression"]
couted_df = couted_df[~couted_df.duplicated()]
couted_df["clickouted"] = 1
sv2v_df = couted_df.groupby("session_id").apply(
lambda x: extract_sv2v_counter(list(x.impression))).reset_index()
sv2v_df.columns = ["session_id", "sv2v"]
X = pd.merge(X, sv2v_df, on="session_id", how="left")
X["sv2v"] = X["sv2v"].fillna("{}")
X["sv2v_score"] = X[["impression", "sv2v"]].apply(
lambda x: x.sv2v[x.impression] if x.impression in x.sv2v else np.nan, axis=1)
X.drop("sv2v", axis=1, inplace=True)
sv2vs_stats = X.groupby("session_id").agg({"sv2v_score": [np.mean, np.std]}).reset_index()
sv2vs_stats.columns = ["session_id", "sv2v_score_mean", "sv2v_score_std"]
X = pd.merge(X, sv2vs_stats, on="session_id", how="left")
X["sv2v_score_norm"] = X["sv2v_score"] - X["sv2v_score_mean"] / X["sv2v_score_std"]
del couted_df
del sv2v_df
del sv2vs_stats
# some action_types are already done by each item during each session
couted_df = all_df[["action_type", "session_id", "reference"]].copy()
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
ated_cols = ["iired"
, "iifed"
, "iiied"
, "iided"
, "sfied"]
for i, action_type in enumerate(action_types):
at_df = couted_df[couted_df.action_type == action_type].copy()
at_df = at_df[["session_id", "reference"]]
at_df.columns = ["session_id", "impression"]
at_df = at_df[~at_df.duplicated()]
at_df[ated_cols[i]] = 1
X = pd.merge(X, at_df, on=["session_id", "impression"], how="left")
X[ated_cols[i]] = X[ated_cols[i]].fillna(0)
X[ated_cols[i]] = X[ated_cols[i]].astype(int)
del at_df
del couted_df
# dropout rate by each item during each session
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(["interaction item image", "clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df["action_type"] == "interaction item image", "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
def is_dropout(iii, cko):
if iii != 0 and cko != 0:
return 0
elif iii != 0 and cko == 0:
return 1
else:
return -1
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# dropout rate by each item during all sessions
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"]
dropout_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
dropout_df = dropout_df[dropout_df.action_type.isin(action_types + ["clickout item"])]
dropout_df = dropout_df[dropout_df.is_y == 0] # to prevent leakage
dropout_df.loc[dropout_df.action_type.isin(action_types), "iii"] = 1
dropout_df["iii"] = dropout_df["iii"].fillna(0)
dropout_df.loc[dropout_df["action_type"] == "clickout item", "cko"] = 1
dropout_df["cko"] = dropout_df["cko"].fillna(0)
dropout_df = dropout_df[["session_id", "reference", "iii", "cko"]].groupby(["session_id", "reference"]).apply(
lambda x: is_dropout(np.sum(x.iii), np.sum(x.cko))).reset_index()
dropout_df.columns = ["session_id", "reference", "dropout"]
dropout_df = dropout_df[dropout_df != -1]
dropout_df = dropout_df[["reference", "dropout"]].groupby("reference").apply(
lambda x: np.sum(x.dropout).astype(float) / len(x.dropout)).reset_index()
dropout_df.columns = ["impression", "all_dropout_rate"]
X = pd.merge(X, dropout_df, on="impression", how="left")
del dropout_df
# action_type rate by each item
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
atstats_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
atstats_df = atstats_df[atstats_df.action_type.isin(action_types)]
atstats_df = atstats_df[atstats_df.is_y == 0] # to prevent leakage
atstats_df = atstats_df[["reference", "action_type"]].groupby(["reference", "action_type"]).size().reset_index()
atstats_df.columns = ["reference", "action_type", "at_cnt"]
atstats_refcnt_df = atstats_df[["reference", "at_cnt"]].groupby("reference").sum().reset_index()
atstats_refcnt_df.columns = ["reference", "rf_cnt"]
atstats_df = pd.merge(atstats_df, atstats_refcnt_df, on="reference", how="left")
atstats_df["at_rate"] = atstats_df["at_cnt"].astype(float) / atstats_df["rf_cnt"]
atstats_df = atstats_df.pivot(index='reference', columns='action_type', values='at_rate').reset_index()
at_rate_cols = ["co_at_rate", "iid_at_rate", "iii_at_rate", "iif_at_rate", "iir_at_rate", "sfi_at_rate"]
atstats_df.columns = ["impression"] + at_rate_cols
atstats_df = atstats_df.fillna(0)
X = pd.merge(X, atstats_df, on="impression", how="left")
for at_rate_col in at_rate_cols:
X[at_rate_col] = X[at_rate_col].fillna(0)
del atstats_df
# action_type rate in-session rank by each item
at_rate_cols = ["co_at_rate"
, "iid_at_rate"
, "iii_at_rate"
, "iif_at_rate"
, "iir_at_rate"
, "sfi_at_rate"]
at_rank_cols = []
for at_rate_col in at_rate_cols:
at_rank_col = at_rate_col + "_rank"
at_rank_cols.append(at_rank_col)
at_rank_df = X[["session_id", at_rate_col]].copy()
at_rank_df = at_rank_df[["session_id", at_rate_col]].groupby("session_id").rank(ascending=False)
at_rank_df.columns = [at_rank_col]
X = pd.concat([X, at_rank_df], axis=1)
del at_rank_df
# reference_elapsed_mean and by action_type
action_types = ["interaction item rating"
, "interaction item info"
, "interaction item image"
, "interaction item deals"
, "search for item"
, "clickout item"]
relapsed_df = all_df[
["session_id", "step", "timestamp", "timestamp_dt", "action_type", "reference", "is_y"]].copy()
relapsed_df["pre_timestamp"] = relapsed_df["timestamp"].shift(1)
relapsed_df["pre_timestamp_dt"] = relapsed_df["timestamp_dt"].shift(1)
relapsed_df["pre_session_id"] = relapsed_df["session_id"].shift(1)
relapsed_df = relapsed_df[relapsed_df.session_id == relapsed_df.pre_session_id]
relapsed_df["elapsed"] = relapsed_df["timestamp"] - relapsed_df["pre_timestamp"]
relapsed_df = relapsed_df[relapsed_df.action_type.isin(action_types)]
relapsed_df = relapsed_df[relapsed_df.is_y == 0] # to prevent leakage
relapsed_df = relapsed_df[relapsed_df.elapsed.notna()]
relapsed_df = relapsed_df[relapsed_df.elapsed > 0]
r_relapsed_df = relapsed_df[["reference", "elapsed"]].groupby("reference").agg(
{"elapsed": np.mean}).reset_index()
r_relapsed_rate_cols = ["ref_elapsed_mean"]
r_relapsed_df.columns = ["impression"] + r_relapsed_rate_cols
a_relapsed_df = relapsed_df[["reference", "action_type", "elapsed"]].groupby(["reference", "action_type"]).agg(
{"elapsed": np.mean}).reset_index()
a_relapsed_df.columns = ["reference", "action_type", "at_elapsed_mean"]
a_relapsed_df = a_relapsed_df.pivot(index='reference', columns='action_type',
values='at_elapsed_mean').reset_index()
a_relapsed_rate_cols = ["co_ref_elapsed_mean", "iid_ref_elapsed_mean", "iii_ref_elapsed_mean",
"iif_ref_elapsed_mean", "iir_ref_elapsed_mean", "sfi_ref_elapsed_mean"]
a_relapsed_df.columns = ["impression"] + a_relapsed_rate_cols
X = pd.merge(X, r_relapsed_df, on="impression", how="left")
X = pd.merge(X, a_relapsed_df, on="impression", how="left")
del relapsed_df
del r_relapsed_df
del a_relapsed_df
# tsh "time split by hour" item ctr
tsh_df = all_df[all_df.action_type == "clickout item"][
["session_id", "action_type", "reference", "timestamp_dt", "is_y"]].copy()
tsh_df["tsh24"] = -1
X["tsh24"] = -1
ts_min = tsh_df["timestamp_dt"].min()
ts_max = tsh_df["timestamp_dt"].max()
def set_tscol(hours):
tscol = "tsh" + str(hours)
ts_start = ts_min
ts_end = ts_start + datetime.timedelta(hours=hours)
ts_bin = 1
while True:
tsh_df.loc[(tsh_df.timestamp_dt >= ts_start) & (tsh_df.timestamp_dt < ts_end), tscol] = ts_bin
X.loc[(X.timestamp_dt >= ts_start) & (X.timestamp_dt < ts_end), tscol] = ts_bin
ts_start = ts_end
ts_end = ts_start + datetime.timedelta(hours=hours)
if ts_start > ts_max:
break
ts_bin += 1
set_tscol(24)
tsh_df = tsh_df[tsh_df.is_y == 0]
tsh24_df = tsh_df[["tsh24"]].groupby(["tsh24"]).size().reset_index()
tsh24_df.columns = ["tsh24", "allcnt"]
tsh24ref_df = tsh_df[["tsh24", "reference"]].groupby(["tsh24", "reference"]).size().reset_index()
tsh24ref_df.columns = ["tsh24", "impression", "rcnt"]
tsh24ref_df = pd.merge(tsh24ref_df, tsh24_df, on="tsh24", how="left")
tsh24ref_df["ctrbytsh24"] = tsh24ref_df["rcnt"].astype(float) / tsh24ref_df["allcnt"].astype(float)
tsh24ref_df = tsh24ref_df[["tsh24", "impression", "ctrbytsh24"]]
X = pd.merge(X, tsh24ref_df, on=["tsh24", "impression"], how="left")
X["ctrbytsh24"] = X["ctrbytsh24"].fillna(0)
del tsh_df
del tsh24_df
del tsh24ref_df
# item ctr by some props
ctrbyprops_df = all_df[all_df.action_type == "clickout item"][["session_id", "reference", "is_y"]].copy()
ctrbyprops_df.columns = ["session_id", "item_id", "is_y"]
star_cols = ["p1 Star", "p2 Star", "p3 Star", "p4 Star", "p5 Star"]
rating_cols = ["pSatisfactory Rating", "pGood Rating", "pVery Good Rating", "pExcellent Rating"]
ctrbyprops_df = pd.merge(ctrbyprops_df, item_props[["item_id"] + star_cols + rating_cols], on="item_id",
how="left")
ctrbyprops_df["star"] = -1
ctrbyprops_df.loc[ctrbyprops_df["p1 Star"] == 1, "star"] = 1
ctrbyprops_df.loc[ctrbyprops_df["p2 Star"] == 1, "star"] = 2
ctrbyprops_df.loc[ctrbyprops_df["p3 Star"] == 1, "star"] = 3
ctrbyprops_df.loc[ctrbyprops_df["p4 Star"] == 1, "star"] = 4
ctrbyprops_df.loc[ctrbyprops_df["p5 Star"] == 1, "star"] = 5
ctrbyprops_df["r6"] = 0
ctrbyprops_df["r7"] = 0
ctrbyprops_df["r8"] = 0
ctrbyprops_df["r9"] = 0
ctrbyprops_df.loc[ctrbyprops_df["pSatisfactory Rating"] == 1, "r6"] = 6
ctrbyprops_df.loc[ctrbyprops_df["pGood Rating"] == 1, "r7"] = 7
ctrbyprops_df.loc[ctrbyprops_df["pVery Good Rating"] == 1, "r8"] = 8
ctrbyprops_df.loc[ctrbyprops_df["pExcellent Rating"] == 1, "r9"] = 9
ctrbyprops_df["rating"] = ctrbyprops_df[["r6", "r7", "r8", "r9"]].apply(
lambda x: np.mean(np.trim_zeros(np.array([x.r6, x.r7, x.r8, x.r9]))), axis=1)
ctrbyprops_df["rating"] = ctrbyprops_df["rating"].fillna(-1)
ctrbyprops_df["star_rating"] = "sr_" + ctrbyprops_df["star"].astype(str) + "_" + ctrbyprops_df["rating"].astype(
str)
ctrbyprops_df = ctrbyprops_df[["session_id", "star_rating", "item_id", "is_y"]]
ctrbyprops_df = ctrbyprops_df[ctrbyprops_df.is_y == 0] # to prevent leakage
ctrbyprops_df = ctrbyprops_df[["item_id", "star_rating"]]
ctrbyprops_df.columns = ["impression", "star_rating"]
prop_df = ctrbyprops_df[["star_rating"]].groupby(["star_rating"]).size().reset_index()
prop_df.columns = ["star_rating", "allcnt"]
propref_df = ctrbyprops_df[["star_rating", "impression"]].groupby(
["star_rating", "impression"]).size().reset_index()
propref_df.columns = ["star_rating", "impression", "rcnt"]
propref_df = pd.merge(propref_df, prop_df, on="star_rating", how="left")
propref_df["ctrbyprops"] = propref_df["rcnt"].astype(float) / propref_df["allcnt"].astype(float)
propref_df = propref_df[["star_rating", "impression", "ctrbyprops"]]
X["star_rating"] = "sr_" + X["star"].astype(str) + "_" + X["rating"].astype(str)
X = pd.merge(X, propref_df, on=["star_rating", "impression"], how="left")
X["ctrbyprops"] = X["ctrbyprops"].fillna(0)
del ctrbyprops_df
del prop_df
del propref_df
# is no serach item
action_types = ["clickout item"]
is_nosi_df = all_df[["session_id", "action_type", "reference", "is_y"]].copy()
is_nosi_df = is_nosi_df.groupby("session_id").first().reset_index()
is_nosi_df = is_nosi_df[(is_nosi_df.action_type.isin(action_types)) & (is_nosi_df.is_y == 0)]
is_nosi_df = is_nosi_df[["reference"]].groupby("reference").size().reset_index()
is_nosi_df.columns = ["impression", "nosearch_cnt"]
X =
|
pd.merge(X, is_nosi_df, on="impression", how="left")
|
pandas.merge
|
"""
Misc tools for implementing data structures
"""
try:
import cPickle as pickle
except ImportError: # pragma: no cover
import pickle
try:
from io import BytesIO
except ImportError: # pragma: no cover
# Python < 2.6
from cStringIO import StringIO as BytesIO
import itertools
from cStringIO import StringIO
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas._tseries as lib
from pandas.util import py3compat
import codecs
import csv
# XXX: HACK for NumPy 1.5.1 to suppress warnings
try:
np.seterr(all='ignore')
np.set_printoptions(suppress=True)
except Exception: # pragma: no cover
pass
class PandasError(Exception):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
def isnull(obj):
'''
Replacement for numpy.isnan / -numpy.isfinite which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
if np.isscalar(obj) or obj is None:
return lib.checknull(obj)
from pandas.core.generic import PandasObject
from pandas import Series
if isinstance(obj, np.ndarray):
if obj.dtype.kind in ('O', 'S'):
# Working around NumPy ticket 1542
shape = obj.shape
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(obj.ravel())
result[:] = vec.reshape(shape)
if isinstance(obj, Series):
result = Series(result, index=obj.index, copy=False)
elif obj.dtype == np.datetime64:
# this is the NaT pattern
result = obj.view('i8') == lib.NaT
else:
result = -np.isfinite(obj)
return result
elif isinstance(obj, PandasObject):
# TODO: optimize for DataFrame, etc.
return obj.apply(isnull)
else:
return obj is None
def notnull(obj):
'''
Replacement for numpy.isfinite / -numpy.isnan which is suitable
for use on object arrays.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
'''
res = isnull(obj)
if np.isscalar(res):
return not res
return -res
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
return arr
def _take_1d_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.int64)
outview = out.view(np.int64)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _take_2d_axis0_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.int64)
outview = out.view(np.int64)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _take_2d_axis1_datetime(arr, indexer, out, fill_value=np.nan):
view = arr.view(np.uint8)
outview = out.view(np.uint8)
lib.take_1d_bool(view, indexer, outview, fill_value=fill_value)
def _view_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if na_override is not None and np.isnan(fill_value):
fill_value = na_override
view = arr.view(wrap_dtype)
outview = out.view(wrap_dtype)
f(view, indexer, outview, fill_value=fill_value)
return wrapper
_take1d_dict = {
'float64' : lib.take_1d_float64,
'int32' : lib.take_1d_int32,
'int64' : lib.take_1d_int64,
'object' : lib.take_1d_object,
'bool' : _view_wrapper(lib.take_1d_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_1d_int64, np.int64,
na_override=lib.NaT),
}
_take2d_axis0_dict = {
'float64' : lib.take_2d_axis0_float64,
'int32' : lib.take_2d_axis0_int32,
'int64' : lib.take_2d_axis0_int64,
'object' : lib.take_2d_axis0_object,
'bool' : _view_wrapper(lib.take_2d_axis0_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_2d_axis0_int64, np.int64,
na_override=lib.NaT),
}
_take2d_axis1_dict = {
'float64' : lib.take_2d_axis1_float64,
'int32' : lib.take_2d_axis1_int32,
'int64' : lib.take_2d_axis1_int64,
'object' : lib.take_2d_axis1_object,
'bool' : _view_wrapper(lib.take_2d_axis1_bool, np.uint8),
'datetime64[us]' : _view_wrapper(lib.take_2d_axis1_int64, np.int64,
na_override=lib.NaT),
}
def _get_take2d_function(dtype_str, axis=0):
if axis == 0:
return _take2d_axis0_dict[dtype_str]
else:
return _take2d_axis1_dict[dtype_str]
def take_1d(arr, indexer, out=None, fill_value=np.nan):
"""
Specialized Cython take which sets NaN values in one pass
"""
dtype_str = arr.dtype.name
n = len(indexer)
if not isinstance(indexer, np.ndarray):
# Cython methods expects 32-bit integers
indexer = np.array(indexer, dtype=np.int32)
indexer = _ensure_int32(indexer)
out_passed = out is not None
take_f = _take1d_dict.get(dtype_str)
if dtype_str in ('int32', 'int64', 'bool'):
try:
if out is None:
out = np.empty(n, dtype=arr.dtype)
take_f(arr, indexer, out=out, fill_value=fill_value)
except ValueError:
mask = indexer == -1
if len(arr) == 0:
if not out_passed:
out = np.empty(n, dtype=arr.dtype)
else:
out = arr.take(indexer, out=out)
if mask.any():
if out_passed:
raise Exception('out with dtype %s does not support NA' %
out.dtype)
out = _maybe_upcast(out)
np.putmask(out, mask, fill_value)
elif dtype_str in ('float64', 'object', 'datetime64[us]'):
if out is None:
out = np.empty(n, dtype=arr.dtype)
take_f(arr, indexer, out=out, fill_value=fill_value)
else:
out = arr.take(indexer, out=out)
mask = indexer == -1
if mask.any():
if out_passed:
raise Exception('out with dtype %s does not support NA' %
out.dtype)
out = _maybe_upcast(out)
np.putmask(out, mask, fill_value)
return out
def take_2d(arr, indexer, out=None, mask=None, needs_masking=None, axis=0,
fill_value=np.nan):
"""
Specialized Cython take which sets NaN values in one pass
"""
dtype_str = arr.dtype.name
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if not isinstance(indexer, np.ndarray):
# Cython methods expects 32-bit integers
indexer = np.array(indexer, dtype=np.int32)
indexer = _ensure_int32(indexer)
if dtype_str in ('int32', 'int64', 'bool'):
if mask is None:
mask = indexer == -1
needs_masking = mask.any()
if needs_masking:
# upcasting may be required
result = arr.take(indexer, axis=axis, out=out)
result = _maybe_mask(result, mask, needs_masking, axis=axis,
out_passed=out is not None,
fill_value=fill_value)
return result
else:
if out is None:
out = np.empty(out_shape, dtype=arr.dtype)
take_f = _get_take2d_function(dtype_str, axis=axis)
take_f(arr, indexer, out=out, fill_value=fill_value)
return out
elif dtype_str in ('float64', 'object', 'datetime64[us]'):
if out is None:
out = np.empty(out_shape, dtype=arr.dtype)
take_f = _get_take2d_function(dtype_str, axis=axis)
take_f(arr, indexer, out=out, fill_value=fill_value)
return out
else:
if mask is None:
mask = indexer == -1
needs_masking = mask.any()
# GH #486
if out is not None and arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
result = arr.take(indexer, axis=axis, out=out)
result = _maybe_mask(result, mask, needs_masking, axis=axis,
out_passed=out is not None,
fill_value=fill_value)
return result
def mask_out_axis(arr, mask, axis, fill_value=np.nan):
indexer = [slice(None)] * arr.ndim
indexer[axis] = mask
arr[tuple(indexer)] = fill_value
def take_fast(arr, indexer, mask, needs_masking, axis=0, out=None,
fill_value=np.nan):
if arr.ndim == 2:
return take_2d(arr, indexer, out=out, mask=mask,
needs_masking=needs_masking,
axis=axis, fill_value=fill_value)
result = arr.take(indexer, axis=axis, out=out)
result = _maybe_mask(result, mask, needs_masking, axis=axis,
out_passed=out is not None, fill_value=fill_value)
return result
def _maybe_mask(result, mask, needs_masking, axis=0, out_passed=False,
fill_value=np.nan):
if needs_masking:
if out_passed and _need_upcast(result):
raise Exception('incompatible type for NAs')
else:
# a bit spaghettified
result = _maybe_upcast(result)
mask_out_axis(result, mask, axis, fill_value)
return result
def _maybe_upcast(values):
if issubclass(values.dtype.type, np.integer):
values = values.astype(float)
elif issubclass(values.dtype.type, np.bool_):
values = values.astype(object)
return values
def _need_upcast(values):
if issubclass(values.dtype.type, (np.integer, np.bool_)):
return True
return False
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(lib.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(lib.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(lib.backfill_inplace_int64, np.int64)
_backfill_2d_datetime = _interp_wrapper(lib.backfill_2d_inplace_int64, np.int64)
def pad_1d(values, limit=None):
if is_float_dtype(values):
_method = lib.pad_inplace_float64
elif is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif values.dtype == np.object_:
_method = lib.pad_inplace_object
else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
_method(values, isnull(values).view(np.uint8), limit=limit)
def backfill_1d(values, limit=None):
if is_float_dtype(values):
_method = lib.backfill_inplace_float64
elif is_datetime64_dtype(values):
_method = _backfill_1d_datetime
elif values.dtype == np.object_:
_method = lib.backfill_inplace_object
else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
_method(values, isnull(values).view(np.uint8), limit=limit)
def pad_2d(values, limit=None):
if is_float_dtype(values):
_method = lib.pad_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif values.dtype == np.object_:
_method = lib.pad_2d_inplace_object
else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
_method(values, isnull(values).view(np.uint8), limit=limit)
def backfill_2d(values, limit=None):
if is_float_dtype(values):
_method = lib.backfill_2d_inplace_float64
elif is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif values.dtype == np.object_:
_method = lib.backfill_2d_inplace_object
else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
_method(values, isnull(values).view(np.uint8), limit=limit)
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
#----------------------------------------------------------------------
# Lots of little utilities
def _infer_dtype(value):
if isinstance(value, (float, np.floating)):
return np.float_
elif isinstance(value, (bool, np.bool_)):
return np.bool_
elif isinstance(value, (int, np.integer)):
return np.int_
else:
return np.object_
def _possibly_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
def _is_bool_indexer(key):
if isinstance(key, np.ndarray) and key.dtype == np.object_:
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif isinstance(key, np.ndarray) and key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
return np.asarray(key).dtype == np.bool_
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import Index
return Index(np.arange(n))
def ensure_float(arr):
if issubclass(arr.dtype.type, np.integer):
arr = arr.astype(float)
return arr
def _mut_exclusive(arg1, arg2):
if arg1 is not None and arg2 is not None:
raise Exception('mutually exclusive arguments')
elif arg1 is not None:
return arg1
else:
return arg2
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def _count_not_none(*args):
return sum(x is not None for x in args)
#------------------------------------------------------------------------------
# miscellaneous python tools
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
return ''.join(Random().sample(string.ascii_letters+string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = unicode(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Example
-------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
_ = seq_it_next.next()
return itertools.izip(seq_it, seq_it_next)
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x:x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
if not isinstance(values, (list, tuple, np.ndarray)):
values = list(values)
if isinstance(values, list) and dtype in [np.object_, object]:
return
|
lib.list_to_object_array(values)
|
pandas._tseries.list_to_object_array
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/1/8 4:54 下午
# @Author: zhoumengjie
# @File : BondUtils.py
import json
import logging
import random
import time
import urllib
import pandas as pd
import requests
from wxcloudrun.common import akclient
header = {'Accept': '*/*',
'Connection': 'keep-alive',
'Content-type': 'application/json;charset=utf-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36'
}
cookie = requests.cookies.RequestsCookieJar()
cookie.set('kbzw__user_login', '7Obd08_P1ebax9aXzaPEpdCYrqXR0dTn8OTb3crUja2aqtqr2cPSkavfqKHcnKiYppOprtXdxtPGqqyon7ClmJ2j1uDb0dWMppOkqqefmqekt7e_1KLA59vZzeDapJ6nnJeKw8La4OHs0OPJr5m-1-3R44LDwtqXwsuByIGlqdSarsuui5ai5-ff3bjVw7_i6Ziun66QqZeXn77Atb2toJnh0uTRl6nbxOLmnJik2NPj5tqYsqSlkqSVrqyrppmggcfa28rr1aaXqZilqqk.;')
cookie.set('kbz_newcookie', '1;')
cookie.set('kbzw_r_uname', 'VANDY;')
cookie.set('kbzw__Session', 'fcqdk3pa4tlatoh6c338e19ju2;')
log = logging.getLogger('log')
jisilu_host = 'https://www.jisilu.cn'
cninfo_webapi_host = 'http://webapi.cninfo.com.cn'
east_host = 'https://emweb.securities.eastmoney.com'
zsxg_host = 'https://zsxg.cn'
cninfo_host = 'http://www.cninfo.com.cn'
cninfo_static_host = 'http://static.cninfo.com.cn/'
image_host = 'https://dficimage.toutiao.com'
code_suff_cache = {}
def format_func(num):
return '{:g}'.format(float(num))
class Crawler:
def __init__(self, timeout=10):
self.__timeout = timeout
def query_list(self):
r""" 查询待发可转债列表
:return:
"""
# 时间戳
now = time.time() # 原始时间数据
timestamp = int(round(now * 1000))
param = {"___jsl": "LST___t=" + str(timestamp)}
r = requests.post(jisilu_host + "/data/cbnew/pre_list/", params=param, headers=header, cookies=cookie)
if r.status_code != 200:
log.info("查询待发可转债列表失败:status_code = " + str(r.status_code))
return None
return r.json()['rows']
def user_info(self):
r = requests.post(jisilu_host + '/webapi/account/userinfo/', headers=header, cookies=cookie)
if r.status_code != 200:
print("查询集思录用户信息失败:status_code = " + str(r.status_code))
return False
data = r.json()
return data['code'] == 200 and data['data'] is not None
def login(self):
h = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
data = 'return_url=' + 'https://www.jisilu.cn/web/data/cb/list' + '&user_name=<PASSWORD>&password=<PASSWORD>&net_auto_login=1&agreement_chk=agree&_post_type=ajax&aes=1'
r = requests.post(jisilu_host + '/account/ajax/login_process/', data=data, headers=h, cookies=cookie)
if r.status_code != 200:
log.info("登录失败:status_code = " + str(r.status_code))
return False
# 重新设置cookies
cookies = r.cookies.get_dict()
for key in cookies.keys():
cookie.set(key, cookies[key])
# refer
h = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
r = requests.get(jisilu_host + '/', cookies=cookie, headers=h)
if r.status_code != 200:
log.info("登录重定向失败:status_code = " + str(r.status_code))
return False
cookies = r.cookies.get_dict()
for key in cookies.keys():
cookie.set(key, cookies[key])
return True
def query_all_bond_list(self) -> pd.DataFrame:
r"""
查询所有已经上市的可转债
:return:
"""
# 先判断是否登录,如果没有则登录
is_login = self.user_info()
if not is_login:
print('jisilu no login...')
is_login = self.login()
print('jisilu login result:{}'.format(is_login))
h = {
'Content-Type': 'application/json; charset=utf-8',
'Init': '1',
'Referer': 'https://www.jisilu.cn/web/data/cb/list',
'Columns': '1,70,2,3,5,6,11,12,14,15,16,29,30,32,34,35,75,44,46,47,52,53,54,56,57,58,59,60,62,63,67',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36'
}
# data = 'btype=C&listed=Y&qflag=N'
r = requests.get(jisilu_host + "/webapi/cb/list_new/", headers=h, cookies=cookie)
if r.status_code != 200:
print("查询所有可转债列表失败:status_code = " + str(r.status_code))
return None
rows = r.json()['data']
df = pd.DataFrame(rows)
return df
def query_industry_list(self, industry_code):
r""" 查询行业可转债列表
:return:
"""
# 先判断是否登录,如果没有则登录
is_login = self.user_info()
if not is_login:
log.info('jisilu no login...')
is_login = self.login()
log.info('jisilu login result:{}'.format(is_login))
h = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
#list=Y=仅看已上市
data = 'sw_cd=' + industry_code + '&listed=Y'
# fprice=&tprice=&curr_iss_amt=&volume=&svolume=&premium_rt=&ytm_rt=&rating_cd=&is_search=Y&market_cd%5B%5D=shmb&market_cd%5B%5D=shkc&market_cd%5B%5D=szmb&market_cd%5B%5D=szcy&btype=C&listed=N&qflag=N&sw_cd=630303&bond_ids=&rp=50
r = requests.post(jisilu_host + "/data/cbnew/cb_list_new/", data=data, headers=h, cookies=cookie)
if r.status_code != 200:
log.info("查询行业可转债列表失败:status_code = " + str(r.status_code))
return None
return r.json()['rows']
def query_announcement_list(self) -> pd.DataFrame:
r"""
查询转债的最新公告
:return:
"""
h = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
data = 'code=&title=&tp[0]=Y'
r = requests.post(jisilu_host + "/webapi/cb/announcement_list/", data=data, headers=h)
if r.status_code != 200:
print("查询转债的最新公告失败:status_code = " + str(r.status_code))
return None
rows = r.json()['data']
df = pd.DataFrame(rows)
return df
def query_bond_data(self) -> pd.DataFrame:
r""" 查询交易中的可转债数据
:return:
"""
# 先判断是否登录,如果没有则登录
is_login = self.user_info()
if not is_login:
log.info('jisilu no login...')
is_login = self.login()
log.info('jisilu login result:{}'.format(is_login))
h = {
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36',
'Referer': 'https://www.jisilu.cn/data/cbnew/'
}
#list=Y=仅看已上市
data = 'btype=C&listed=Y&qflag=N'
# fprice=&tprice=&curr_iss_amt=&volume=&svolume=&premium_rt=&ytm_rt=&rating_cd=&is_search=Y&market_cd%5B%5D=shmb&market_cd%5B%5D=shkc&market_cd%5B%5D=szmb&market_cd%5B%5D=szcy&btype=C&listed=N&qflag=N&sw_cd=630303&bond_ids=&rp=50
r = requests.post(jisilu_host + "/data/cbnew/cb_list_new/", data=data, headers=h, cookies=cookie)
if r.status_code != 200:
log.info("查询交易中的可转债数据:status_code = " + str(r.status_code))
return None
rows = r.json()['rows']
cells = list(map(lambda x: x['cell'], rows))
df =
|
pd.DataFrame(cells)
|
pandas.DataFrame
|
import pandas as pd
import pandas.api.types as types
from Levenshtein.StringMatcher import distance
""" Find and replace """
def find_replace(dataframe: pd.DataFrame, column_name: str, to_replace, value) -> pd.DataFrame:
dataframe[column_name].replace(to_replace, value, inplace=True)
return dataframe
def find_replace_regex(dataframe: pd.DataFrame, column_name: str, to_replace: str, value: str) -> pd.DataFrame:
if types.is_string_dtype(dataframe[column_name]):
dataframe[column_name].replace(to_replace, value, inplace=True, regex=True)
return dataframe
def find_replace_all(dataframe: pd.DataFrame, to_replace, value) -> pd.DataFrame:
dataframe.replace(to_replace, value, inplace=True)
return dataframe
""" Normalization """
def normalize(dataframe: pd.DataFrame, column_name: str) -> pd.DataFrame:
col = dataframe[column_name]
if not types.is_numeric_dtype(col):
return dataframe
dataframe[column_name] = (col - col.min()) / (col.max() - col.min())
return dataframe
def normalize_all(dataframe: pd.DataFrame) -> pd.DataFrame:
func = {False: lambda col: col,
True: lambda col: (col - col.min()) / (col.max() - col.min())}
return dataframe.transform(lambda col: func[types.is_numeric_dtype(dataframe[col.name])](col))
""" Outliers """
def remove_outliers(dataframe: pd.DataFrame, column_name: str, outside_range: float) -> pd.DataFrame:
col = dataframe[column_name]
if not types.is_numeric_dtype(col):
return dataframe
return dataframe[(col - col.mean()).abs() <= (col.std() * outside_range)]
def remove_all_outliers(dataframe: pd.DataFrame, outside_range: float) -> pd.DataFrame:
return dataframe[dataframe.apply(lambda col:
not
|
types.is_numeric_dtype(dataframe[col.name])
|
pandas.api.types.is_numeric_dtype
|
import pprint
pp = pprint.PrettyPrinter(indent=4)
import numpy as np
import pandas as pd
import joblib
import autopluspy
import streamlit as st
# Function :
def display_results(results):
## Display results
st.title(f'Predicted price : {results} euros')
# Config
reg_mdl_filename = 'final_reg.sav'
std_mdl_filename = 'final_std.sav'
features_filename = 'final_features.sav'
data_dict_path = 'data_dict.txt'
# Call saved file
loaded_model = joblib.load(reg_mdl_filename)
loaded_std = joblib.load(std_mdl_filename)
features_list = joblib.load(features_filename)
data_dict = autopluspy.data_eng.read_data_dict(data_dict_path)
features_name = list(data_dict.keys())
features_name = features_name[1:]
# Start the app
st.title('Car price estimator')
st.write(' ')
st.write(' ')
d = {}
temp = {}
quanti_features = []
quality_features = []
for feature in features_list:
d[feature] = 0
for feature in features_name:
if data_dict[feature]['type'] == 'numerical':
key = feature
value = st.number_input(f'Insert {feature}:')
temp[key] = value
quanti_features.append(feature)
if data_dict[feature]['type'] == 'categorical':
key = feature
value = st.selectbox(f'Select {feature}: ', tuple(data_dict[feature]['unique_value']))
temp[key] = value
quality_features.append(feature)
## Running Prediction
if st.button('Run'):
x_temp =
|
pd.DataFrame(data=temp, index=[0])
|
pandas.DataFrame
|
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series, isna, notna
import pandas._testing as tm
import pandas.tseries.offsets as offsets
def _check_moment_func(
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
series=None,
frame=None,
**kwargs,
):
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(series[-50:]))
frame_result = get_result(frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
ser = series[::2].resample("B").mean()
frm = frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(ser, window=win, min_periods=minp)
frame_result = get_result(frm, window=win, min_periods=minp)
else:
series_result = get_result(ser, window=win, min_periods=0)
frame_result = get_result(frm, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(series) - 1, len(series)):
result = get_result(series, len(series) + 1, min_periods=minp)
expected = get_result(series, len(series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(series, len(series) + 1, min_periods=0)
expected = get_result(series, len(series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
series.reindex(list(series.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
frame.reindex(list(frame.index) + s), window=25, min_periods=minp
)
.shift(-12)
.reindex(frame.index)
)
series_rs = get_result(series, window=25, min_periods=minp, center=True)
frame_rs = get_result(frame, window=25, min_periods=minp, center=True)
else:
series_xp = (
get_result(
series.reindex(list(series.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(series.index)
)
frame_xp = (
get_result(
frame.reindex(list(frame.index) + s), window=25, min_periods=0
)
.shift(-12)
.reindex(frame.index)
)
series_rs = get_result(series, window=25, min_periods=0, center=True)
frame_rs = get_result(frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def test_centered_axis_validation():
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
msg = "No axis named 1 for object type Series"
with pytest.raises(ValueError, match=msg):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
@td.skip_if_no_scipy
def test_cmov_mean():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window():
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected_values = [
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
expected = Series(expected_values)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner():
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods():
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(win_types):
# GH 8238
vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp =
|
Series(xps[win_types])
|
pandas.Series
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
import pandas as pd
import numpy as np
from tmc import points
from tmc.utils import load, get_out, patch_helper
module_name="src.operations_on_series"
create_series = load(module_name, "create_series")
modify_series = load(module_name, "modify_series")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p03-14.1')
class OperationsOnSeries(unittest.TestCase):
def test_creation(self):
self.assertEqual("", "")
L1=[2,3,4]
L2=[9,8,7]
indices=list("abc")
# with patch(patch_name(module_name, "pd.core.series.Series"), wraps=pd.core.series.Series) as ps:
with patch(ph("pd.Series"), wraps=pd.Series) as ps:
ret = create_series(L1, L2)
self.assertEqual(len(ret), 2, msg="Expected a pair of Series as a return value from function create_series!")
s1, s2 = ret
#ps.assert_called()
self.assertEqual(ps.call_count, 2, msg="Expected the constructor pd.Series to be called exactly twice!")
np.testing.assert_array_equal(s1.values, L1,
err_msg="Expected values of first series to be %s" % L1)
np.testing.assert_array_equal(s2.values, L2,
err_msg="Expected values of second series to be %s" % L2)
np.testing.assert_array_equal(s1.index, indices,
err_msg="Expected the index of first series to be %s" % indices)
np.testing.assert_array_equal(s2.index, indices,
err_msg="Expected the index of second series to be %s" % indices)
def test_modification(self):
indices=list("abc")
s1 = pd.Series([0,1,2], index=indices)
s2 =
|
pd.Series([3,4,5], index=indices)
|
pandas.Series
|
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231,E0202
from numpy import nan
import numpy as np
from pandas.core.common import _pickle_array, _unpickle_array, _try_sort
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import _check_slice_bounds, _maybe_convert_indices
from pandas.core.series import Series
from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray,
_default_index)
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas.sparse.series import SparseSeries
from pandas.util.decorators import Appender
import pandas.lib as lib
class _SparseMockBlockManager(object):
def __init__(self, sp_frame):
self.sp_frame = sp_frame
def get(self, item):
return self.sp_frame[item].values
def iget(self, i):
return self.get(self.sp_frame.columns[i])
@property
def shape(self):
x, y = self.sp_frame.shape
return y, x
@property
def axes(self):
return [self.sp_frame.columns, self.sp_frame.index]
@property
def blocks(self):
""" return our series in the column order """
return [ self.iget(i) for i, c in enumerate(self.sp_frame.columns) ]
def get_numeric_data(self):
# does not check, but assuming all numeric for now
return self.sp_frame
def get_bool_data(self):
raise NotImplementedError
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries. Will not
override SparseSeries passed in
"""
_columns = None
_series = None
_is_mixed_type = False
_col_klass = SparseSeries
ndim = 2
def __init__(self, data=None, index=None, columns=None,
default_kind='block', default_fill_value=None):
if default_fill_value is None:
default_fill_value = np.nan
self.default_kind = default_kind
self.default_fill_value = default_fill_value
if isinstance(data, dict):
sdict, columns, index = self._init_dict(data, index, columns)
elif isinstance(data, (np.ndarray, list)):
sdict, columns, index = self._init_matrix(data, index, columns)
elif isinstance(data, DataFrame):
sdict, columns, index = self._init_dict(data, data.index,
data.columns)
elif data is None:
sdict = {}
if index is None:
index = Index([])
else:
index = _ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
sdict[c] = SparseSeries(np.nan, index=index,
kind=self.default_kind,
fill_value=self.default_fill_value)
self._series = sdict
self.columns = columns
self.index = index
def _from_axes(self, data, axes):
columns, index = axes
return self._constructor(data, index=index, columns=columns)
@cache_readonly
def _data(self):
return _SparseMockBlockManager(self)
def _consolidate_inplace(self):
# do nothing when DataFrame calls this method
pass
def convert_objects(self, convert_dates=True):
# XXX
return self
@property
def _constructor(self):
def wrapper(data, index=None, columns=None, copy=False):
sf = SparseDataFrame(data, index=index, columns=columns,
default_fill_value=self.default_fill_value,
default_kind=self.default_kind)
if copy:
sf = sf.copy()
return sf
return wrapper
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = _ensure_index(columns)
data = dict((k, v) for k, v in data.iteritems() if k in columns)
else:
columns = Index(_try_sort(data.keys()))
if index is None:
index = extract_index(data.values())
sp_maker = lambda x: SparseSeries(x, index=index,
kind=self.default_kind,
fill_value=self.default_fill_value,
copy=True)
sdict = {}
for k, v in data.iteritems():
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v)
else:
if isinstance(v, dict):
v = [v.get(i, nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_vec = np.empty(len(index))
nan_vec.fill(nan)
for c in columns:
if c not in sdict:
sdict[c] = sp_maker(nan_vec)
return sdict, columns, index
def _init_matrix(self, data, index, columns, dtype=None):
data = _prep_ndarray(data, copy=False)
N, K = data.shape
if index is None:
index = _default_index(N)
if columns is None:
columns = _default_index(K)
if len(columns) != K:
raise Exception('Column length mismatch: %d vs. %d' %
(len(columns), K))
if len(index) != N:
raise Exception('Index length mismatch: %d vs. %d' %
(len(index), N))
data = dict([(idx, data[:, i]) for i, idx in enumerate(columns)])
return self._init_dict(data, index, columns, dtype)
def __array_wrap__(self, result):
return SparseDataFrame(result, index=self.index, columns=self.columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def __getstate__(self):
series = dict((k, (v.sp_index, v.sp_values))
for k, v in self.iteritems())
columns = self.columns
index = self.index
return (series, columns, index, self.default_fill_value,
self.default_kind)
def __setstate__(self, state):
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
columns =
|
_unpickle_array(cols)
|
pandas.core.common._unpickle_array
|
"""
Download, transform and simulate various binary datasets.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: MIT
from re import sub
from collections import Counter
from itertools import product
from urllib.parse import urljoin
from string import ascii_lowercase
from zipfile import ZipFile
from io import BytesIO, StringIO
import requests
import numpy as np
import pandas as pd
from sklearn.utils import check_X_y
from imblearn.datasets import make_imbalance
from .base import Datasets, FETCH_URLS, RANDOM_STATE
class ImbalancedBinaryDatasets(Datasets):
"""Class to download, transform and save binary class imbalanced
datasets."""
MULTIPLICATION_FACTORS = [2, 3]
@staticmethod
def _calculate_ratio(multiplication_factor, y):
"""Calculate ratio based on IRs multiplication factor."""
ratio = Counter(y).copy()
ratio[1] = int(ratio[1] / multiplication_factor)
return ratio
def _make_imbalance(self, data, multiplication_factor):
"""Undersample the minority class."""
X_columns = [col for col in data.columns if col != "target"]
X, y = check_X_y(data.loc[:, X_columns], data.target)
if multiplication_factor > 1.0:
sampling_strategy = self._calculate_ratio(multiplication_factor, y)
X, y = make_imbalance(
X, y, sampling_strategy=sampling_strategy, random_state=RANDOM_STATE
)
data = pd.DataFrame(np.column_stack((X, y)))
data.iloc[:, -1] = data.iloc[:, -1].astype(int)
return data
def download(self):
"""Download the datasets and append undersampled versions of them."""
super(ImbalancedBinaryDatasets, self).download()
undersampled_datasets = []
for (name, data), factor in list(
product(self.content_, self.MULTIPLICATION_FACTORS)
):
ratio = self._calculate_ratio(factor, data.target)
if ratio[1] >= 15:
data = self._make_imbalance(data, factor)
undersampled_datasets.append((f"{name} ({factor})", data))
self.content_ += undersampled_datasets
return self
def fetch_breast_tissue(self):
"""Download and transform the Breast Tissue Data Set.
The minority class is identified as the `car` and `fad`
labels and the majority class as the rest of the labels.
http://archive.ics.uci.edu/ml/datasets/breast+tissue
"""
data = pd.read_excel(FETCH_URLS["breast_tissue"], sheet_name="Data")
data = data.drop(columns="Case #").rename(columns={"Class": "target"})
data["target"] = data["target"].isin(["car", "fad"]).astype(int)
return data
def fetch_ecoli(self):
"""Download and transform the Ecoli Data Set.
The minority class is identified as the `pp` label
and the majority class as the rest of the labels.
https://archive.ics.uci.edu/ml/datasets/ecoli
"""
data =
|
pd.read_csv(FETCH_URLS["ecoli"], header=None, delim_whitespace=True)
|
pandas.read_csv
|
import random
import pickle
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
random.seed(1234)
class dataPreprocess(object):
def __init__(self, filepath):
'''
Description:
Initialization/Constructor function
'''
self.filepath = filepath
def preprocess(self):
'''
Description:
Function to preprocess the data and store as '.pkl' files
INPUT:
Path to the dataset folder (keep the dataset as 'ratings_data.txt' for ratings data and 'trust_data.txt' for social trust data
OUTPUT:
This function doesn't return anything but stores the data is '.pkl' files at the same folder.
'''
ratingsData = np.loadtxt(self.filepath+'/ratings_data.txt', dtype = np.int32)
trustData = np.loadtxt(self.filepath+'/trust_data.txt', dtype = np.int32)
ratingsList = []
trustList = []
users = set()
items = set()
for row in ratingsData:
userId = row[0]
itemId = row[1]
rating = row[2]
if userId not in users:
users.add(userId)
if itemId not in items:
items.add(itemId)
ratingsList.append([userId,itemId,rating])
userCount = len(users)
itemCount = len(items)
for row in trustData:
user1 = row[0]
user2 = row[1]
trust = row[2]
trustList.append([user1, user2, trust])
newDF = pd.DataFrame(ratingsList, columns=['userId','itemId','rating'])
X = np.array([newDF['userId'],newDF['itemId']]).T
y = np.array([newDF['rating']]).T
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
train = pd.DataFrame(X_train,columns = ['userId','itemId'])
train['rating'] =
|
pd.DataFrame(y_train)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ..data_structures.sarray import SArray
from ..data_structures.sframe import SFrame
from ..data_structures.sarray import load_sarray
from .._cython.cy_flexible_type import GMT
from . import util
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import time
import warnings
import functools
import tempfile
import sys
import six
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0)),None]
self.datetime_data2 = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111).replace(tzinfo=GMT(0.0)),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.np_array_data = [np.array(x) for x in self.vec_data]
self.empty_np_array_data = [np.array([])]
self.np_matrix_data = [np.matrix(x) for x in self.vec_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype, _type)
self.assertEqual(len(_sarray), len(_data))
sarray_contents = list(_sarray.head(len(_sarray)))
if _type == np.ndarray:
# Special case for np.ndarray elements, which assertSequenceEqual
# does not handle.
np.testing.assert_array_equal(sarray_contents, _data)
else:
# Use unittest methods when possible for better consistency.
self.assertSequenceEqual(sarray_contents, _data)
def __test_almost_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype, _type)
self.assertEqual(len(_sarray), len(_data))
l = list(_sarray)
for i in range(len(l)):
if type(l[i]) in (list, array.array):
for j in range(len(l[i])):
self.assertAlmostEqual(l[i][j], _data[i][j])
else:
self.assertAlmostEqual(l[i], _data[i])
def __test_creation_raw(self, data, dtype, expected):
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_pd(self, data, dtype, expected):
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
self.__test_creation_raw(data, dtype, expected)
self.__test_creation_pd(data, dtype, expected)
def __test_creation_type_inference_raw(self, data, expected_dtype, expected):
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
def __test_creation_type_inference_pd(self, data, expected_dtype, expected):
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
self.__test_creation_type_inference_raw(data, expected_dtype, expected)
self.__test_creation_type_inference_pd(data, expected_dtype, expected)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.np_array_data, np.ndarray, self.np_array_data)
self.__test_creation(self.empty_np_array_data, np.ndarray,
self.empty_np_array_data)
self.__test_creation(self.np_matrix_data, np.ndarray, self.np_matrix_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with map/filter type
self.__test_creation_raw(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_raw(map(lambda x: x * 10, self.int_data),
float,
[float(x) * 10 for x in self.int_data])
self.__test_creation_raw(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_raw(filter(lambda x: x < 5, self.int_data),
int,
list(filter(lambda x: x < 5, self.int_data)))
self.__test_creation_raw(filter(lambda x: x > 5, self.float_data),
float,
list(filter(lambda x: x > 5, self.float_data)))
self.__test_creation_raw(filter(lambda x: len(x) > 3, self.string_data),
str,
list(filter(lambda x: len(x) > 3, self.string_data)))
self.__test_creation_pd(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_pd(map(lambda x: x * 10, self.int_data),
float,
[float(x) * 10 for x in self.int_data])
self.__test_creation_pd(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference(self.np_array_data, np.ndarray,
self.np_array_data)
self.__test_creation_type_inference(self.empty_np_array_data,
np.ndarray,
self.empty_np_array_data)
self.__test_creation_type_inference(self.np_matrix_data, np.ndarray,
self.np_matrix_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
self.__test_creation((1,2,3,4), int, [1,2,3,4])
self.__test_creation_type_inference_raw(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_type_inference_raw(map(lambda x: x * 10, self.float_data),
float,
[x * 10 for x in self.float_data])
self.__test_creation_type_inference_raw(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_type_inference_pd(map(lambda x: x + 10, self.int_data),
int,
[x + 10 for x in self.int_data])
self.__test_creation_type_inference_pd(map(lambda x: x * 10, self.float_data),
float,
[float(x) * 10 for x in self.float_data])
self.__test_creation_type_inference_pd(map(lambda x: x * 10, self.string_data),
str,
[x * 10 for x in self.string_data])
self.__test_creation_type_inference_raw(filter(lambda x: x < 5, self.int_data),
int,
list(filter(lambda x: x < 5, self.int_data)))
self.__test_creation_type_inference_raw(filter(lambda x: x > 5, self.float_data),
float,
list(filter(lambda x: x > 5, self.float_data)))
self.__test_creation_type_inference_raw(filter(lambda x: len(x) > 3, self.string_data),
str,
list(filter(lambda x: len(x) > 3, self.string_data)))
# genertors
def __generator_parrot(data):
for ii in data:
yield ii
self.__test_creation_raw(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_raw(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_raw(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_pd(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_pd(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_pd(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_type_inference_raw(__generator_parrot(self.string_data), str, self.string_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.int_data), int, self.int_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.float_data), float, self.float_data)
self.__test_creation_type_inference_pd(__generator_parrot(self.string_data), str, self.string_data)
# Test numpy types, which are not compatible with the pd.Series path in
# __test_creation and __test_creation_type_inference
self.__test_equal(SArray(np.array(self.vec_data), array.array),
self.vec_data, array.array)
self.__test_equal(SArray(np.matrix(self.vec_data), array.array),
self.vec_data, array.array)
self.__test_equal(SArray(np.array(self.vec_data)),
self.vec_data, array.array)
self.__test_equal(SArray(np.matrix(self.vec_data)),
self.vec_data, array.array)
# Test python 3
self.__test_equal(SArray(filter(lambda x: True, self.int_data)), self.int_data, int)
self.__test_equal(SArray(map(lambda x: x, self.int_data)), self.int_data, int)
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype, float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype, int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_in(self):
sint = SArray(self.int_data, int)
self.assertTrue(5 in sint)
self.assertFalse(20 in sint)
sstr = SArray(self.string_data, str)
self.assertTrue("abc" in sstr)
self.assertFalse("zzzzzz" in sstr)
self.assertFalse("" in sstr)
self.__test_equal(sstr.contains("ll"), ["ll" in i for i in self.string_data], int)
self.__test_equal(sstr.contains("a"), ["a" in i for i in self.string_data], int)
svec = SArray([[1.0,2.0],[2.0,3.0],[3.0,4.0],[4.0,5.0]], array.array)
self.__test_equal(svec.contains(1.0), [1,0,0,0], int)
self.__test_equal(svec.contains(0.0), [0,0,0,0], int)
self.__test_equal(svec.contains(2), [1,1,0,0], int)
slist = SArray([[1,"22"],[2,"33"],[3,"44"],[4,None]], list)
self.__test_equal(slist.contains(1.0), [1,0,0,0], int)
self.__test_equal(slist.contains(3), [0,0,1,0], int)
self.__test_equal(slist.contains("33"), [0,1,0,0], int)
self.__test_equal(slist.contains("3"), [0,0,0,0], int)
self.__test_equal(slist.contains(None), [0,0,0,1], int)
sdict = SArray([{1:"2"},{2:"3"},{3:"4"},{"4":"5"}], dict)
self.__test_equal(sdict.contains(1.0), [1,0,0,0], int)
self.__test_equal(sdict.contains(3), [0,0,1,0], int)
self.__test_equal(sdict.contains("4"), [0,0,0,1], int)
self.__test_equal(sdict.contains("3"), [0,0,0,0], int)
self.__test_equal(SArray(['ab','bc','cd']).is_in('abc'), [1,1,0], int)
self.__test_equal(SArray(['a','b','c']).is_in(['a','b']), [1,1,0], int)
self.__test_equal(SArray([1,2,3]).is_in(array.array('d',[1.0,2.0])), [1,1,0], int)
self.__test_equal(SArray([1,2,None]).is_in([1, None]), [1,0,1], int)
self.__test_equal(SArray([1,2,None]).is_in([1]), [1,0,0], int)
def test_save_load(self):
# Check top level load function
with util.TempDirectory() as f:
sa = SArray(self.float_data)
sa.save(f)
sa2 = load_sarray(f)
self.__test_equal(sa2, self.float_data, float)
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEqual(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEqual(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elements.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(len(sa_random)))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in range(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elements.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(len(sa_random)))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
# Python 3 doesn't return keys in same order from identical dictionaries.
sort_by_type = lambda x : str(type(x))
sa_list = sa_dict.apply(lambda x: sorted(list(x), key = sort_by_type))
self.__test_equal(sa_list, [sorted(list(x), key = sort_by_type) for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: 'a' in x if x is not None else None, skip_na=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: 'a' in x)
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: 1 in x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(len(no_change), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEqual(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEqual(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype, float)
# test float -> int
s = SArray(list(map(lambda x: x+0.2, self.float_data)), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), list(map(lambda x: str(x), self.int_data)))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
# astype between list and array
s = SArray([array.array('d',[1.0,2.0]), array.array('d',[2.0,3.0])])
ret = list(s.astype(list))
self.assertEqual(ret, [[1.0, 2.0], [2.0,3.0]])
ret = list(s.astype(list).astype(array.array))
self.assertEqual(list(s), list(ret))
with self.assertRaises(RuntimeError):
ret = list(SArray([["a",1.0],["b",2.0]]).astype(array.array))
badcast = list(SArray([["a",1.0],["b",2.0]]).astype(array.array, undefined_on_failure=True))
self.assertEqual(badcast, [None, None])
with self.assertRaises(TypeError):
s.astype(None)
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array)
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data))
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.countna(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.countna(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.countna(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.countna(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.countna(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.countna(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.countna(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.countna(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(list(s.tail()), [x for x in range(30,40)])
# smaller amount
self.assertEqual(list(s.tail(3)), [x for x in range(37,40)])
# larger amount
self.assertEqual(list(s.tail(40)), [x for x in range(0,40)])
# too large
self.assertEqual(list(s.tail(81)), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(list(map(lambda x: x*-1, self.int_data)), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.mean() is None)
# test sum
t = SArray([], float).sum()
self.assertTrue(type(t) == float)
self.assertTrue(t == 0.0)
t = SArray([], int).sum()
self.assertTrue(type(t) == int or type(t) == long)
self.assertTrue(t == 0)
self.assertTrue(SArray([], array.array).sum() == array.array('d',[]))
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_max_min_sum_mean_missing(self):
# negative and positive
s = SArray([-2,0,None,None,None], int)
self.assertEqual(s.max(), 0)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), -2)
self.assertAlmostEqual(s.mean(), -1)
s = SArray([None,None,None], int)
self.assertEqual(s.max(), None)
self.assertEqual(s.min(), None)
self.assertEqual(s.sum(), 0)
self.assertEqual(s.mean(), None)
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertRaises(ValueError, lambda: bool(s))
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(list(s), self.int_data)
self.assertRaises(ValueError, lambda: bool(s))
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEqual(sum1, realsum)
self.assertEqual(sum2, realsum)
self.assertEqual(sum3, realsum)
# abs
s=np.array(range(-10, 10))
t = SArray(s, int)
self.__test_equal(abs(t), list(abs(s)), int)
t = SArray(s, float)
self.__test_equal(abs(t), list(abs(s)), float)
t = SArray([s], array.array)
self.__test_equal(SArray(abs(t)[0]), list(abs(s)), float)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10])
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t ** 2, list(s ** 2), float)
self.__test_almost_equal(t ** 0.5, list(s ** 0.5), float)
self.__test_equal(((t ** 2) ** 0.5 + 1e-8).astype(int), list(s), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(t % 5, list(s % 5), int)
self.__test_equal(t // 5, list(s // 5), int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(+t, list(+s), int)
self.__test_equal(-t, list(-s), int)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
self.__test_equal(2**t, list(2**s), float)
s_neg = np.array([-1,-2,-3,5,6,7,8,9,10])
t_neg = SArray(s_neg, int)
self.__test_equal(t_neg // 5, list(s_neg // 5), int)
self.__test_equal(t_neg % 5, list(s_neg % 5), int)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0, 0, 0, 0, 1], int)
self.__test_equal(s != None, [1, 1, 1, 1, 0], int)
def test_modulus_operator(self):
l = [-5,-4,-3,-2,-1,0,1,2,3,4,5]
t = SArray(l, int)
self.__test_equal(t % 2, [i % 2 for i in l], int)
self.__test_equal(t % -2, [i % -2 for i in l], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10])
s2=np.array([5,4,3,2,1,10,9,8,7,6])
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t ** t2, list(s ** s2), float)
self.__test_almost_equal(t ** (1.0 / t2), list(s ** (1.0 / s2)), float)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_almost_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s ** s, [array.array('d', [float(j) ** float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(s // s, [array.array('d', [float(j) // float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_almost_equal(+s, [array.array('d', [float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_almost_equal(-s, [array.array('d', [-float(j) for j in i]) for i in self.vec_data], array.array)
neg_float_data = [-v for v in self.float_data]
t = SArray(neg_float_data, float)
self.__test_almost_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s ** t, [array.array('d', [float(j) ** i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(s // t, [array.array('d', [float(j) // i[1] for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
self.__test_almost_equal(t // s, [array.array('d', [i[1] // float(j) for j in i[0]]) for i in zip(self.vec_data, neg_float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_div_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
def test_floodiv_corner(self):
def try_eq_sa_val(left_val, right_val):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(right_val) is array.array:
if type(left_val) is array.array:
v2 = array.array('d', [lv // rv for lv, rv in zip(left_val, right_val)])
else:
v2 = array.array('d', [left_val // rv for rv in right_val])
else:
if type(left_val) is array.array:
v2 = array.array('d', [lv // right_val for lv in left_val])
else:
v2 = left_val // right_val
if type(v1) in six.integer_types:
self.assertTrue(type(v2) in six.integer_types)
else:
self.assertEqual(type(v1), type(v2))
self.assertEqual(v1, v2)
try_eq_sa_val(1, 2)
try_eq_sa_val(1.0, 2)
try_eq_sa_val(1, 2.0)
try_eq_sa_val(1.0, 2.0)
try_eq_sa_val(-1, 2)
try_eq_sa_val(-1.0, 2)
try_eq_sa_val(-1, 2.0)
try_eq_sa_val(-1.0, 2.0)
try_eq_sa_val([1, -1], 2)
try_eq_sa_val([1, -1], 2.0)
try_eq_sa_val(2,[3, -3])
try_eq_sa_val(2.0,[3, -3])
from math import isnan
def try_eq_sa_correct(left_val, right_val, correct):
if type(left_val) is list:
left_val = array.array('d', left_val)
if type(right_val) is list:
right_val = array.array('d', right_val)
left_type = type(left_val)
v1 = (SArray([left_val], left_type) // right_val)[0]
if type(correct) is not list:
v1 = [v1]
correct = [correct]
for v, c in zip(v1, correct):
if type(v) is float and isnan(v):
assert isnan(c)
else:
self.assertEqual(type(v), type(c))
self.assertEqual(v, c)
try_eq_sa_correct(1, 0, None)
try_eq_sa_correct(0, 0, None)
try_eq_sa_correct(-1, 0, None)
try_eq_sa_correct(1.0, 0, float('inf'))
try_eq_sa_correct(0.0, 0, float('nan'))
try_eq_sa_correct(-1.0, 0, float('-inf'))
try_eq_sa_correct([1.0,0,-1], 0, [float('inf'), float('nan'), float('-inf')])
try_eq_sa_correct(1, [1.0, 0], [1., float('inf')])
try_eq_sa_correct(-1, [1.0, 0], [-1., float('-inf')])
try_eq_sa_correct(0, [1.0, 0], [0., float('nan')])
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1])
s2=np.array([0,1,0,1,0,1,0,1])
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_logical_ops_missing_value_propagation(self):
s=[0, 0,0,None, None, None,1,1, 1]
s2=[0,None,1,0, None, 1, 0,None,1]
t = SArray(s, int)
t2 = SArray(s2, int)
and_result = [0,0,0,0,None,None,0,None,1]
or_result = [0,None,1,None,None,1,1,1,1]
self.__test_equal(t & t2, and_result, int)
self.__test_equal(t | t2, or_result, int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"]
s2=["e","d","c","b","a","j","i","h","g","f"]
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEqual(len((t + t2).dropna()), 7)
self.assertEqual(len((t - t2).dropna()), 7)
self.assertEqual(len((t * t2).dropna()), 7)
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEqual(len(t.dropna()), 6)
self.assertEqual(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEqual(len(t2.dropna()), 0)
self.assertEqual(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEqual(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEqual(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEqual(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEqual(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEqual(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEqual(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEqual(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(list(sa_sample.head()), list(sa_sample2.head()))
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
self.assertEqual(len(SArray.from_sequence(100).sample(0.5, 1, exact=True)), 50)
self.assertEqual(len(SArray.from_sequence(100).sample(0.5, 2, exact=True)), 50)
def test_hash(self):
a = SArray([0,1,0,1,0,1,0,1], int)
b = a.hash()
zero_hash = b[0]
one_hash = b[1]
self.assertTrue((b[a] == one_hash).all())
self.assertTrue((b[1-a] == zero_hash).all())
# I can hash other stuff too
# does not throw
a.astype(str).hash().__materialize__()
a.apply(lambda x: [x], list).hash().__materialize__()
# Nones hash too!
a = SArray([None, None, None], int).hash()
self.assertTrue(a[0] is not None)
self.assertTrue((a == a[0]).all())
# different seeds give different hash values
self.assertTrue((a.hash(seed=0) != a.hash(seed=1)).all())
def test_random_integers(self):
a = SArray.random_integers(0)
self.assertEqual(len(a), 0)
a = SArray.random_integers(1000)
self.assertEqual(len(a), 1000)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array)
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def _my_element_slice(self, arr, start=None, stop=None, step=1):
return arr.apply(lambda x: x[slice(start, stop, step)], arr.dtype)
def _slice_equality_test(self, arr, start=None, stop=None, step=1):
self.assertEqual(
list(arr.element_slice(start, stop, step)),
list(self._my_element_slice(arr,start,stop,step)))
def test_element_slice(self):
#string slicing
g=SArray(range(1,1000, 10)).astype(str)
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#list slicing
g=SArray(range(1,10)).apply(lambda x: list(range(x)), list)
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#array slicing
import array
g=SArray(range(1,10)).apply(lambda x: array.array('d', range(x)))
self._slice_equality_test(g, 0, 2)
self._slice_equality_test(g, 0, -1, 2)
self._slice_equality_test(g, -1, -3)
self._slice_equality_test(g, -1, -2, -1)
self._slice_equality_test(g, None, None, -1)
self._slice_equality_test(g, -100, -1)
#this should fail
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).element_slice(1)
with self.assertRaises(TypeError):
g=SArray(range(1,1000)).astype(float).element_slice(1)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = len(sa3)
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n // 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!",
"中文 应该也 行",
'Сблъсъкът между'])
expected = [{"this": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "http://someurl!!": 1, "someurl": 1, "is": 1},
{"中文": 1, "应该也": 1, "行": 1},
{"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEqual(sa1.dtype, dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEqual(sa1.dtype, dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_word_count2(self):
sa = SArray(["This is some url http://www.someurl.com!!", "Should we? Yes, we should."])
#TODO: Get some weird unicode whitespace in the Chinese and Russian tests
expected1 = [{"this": 1, "is": 1, "some": 1, "url": 1, "http://www.someurl.com!!": 1},
{"should": 1, "we?": 1, "we": 1, "yes,": 1, "should.": 1}]
expected2 = [{"this is some url http://www.someurl.com": 1},
{"should we": 1, " yes": 1, " we should.": 1}]
word_counts1 = sa._count_words()
word_counts2 = sa._count_words(delimiters=["?", "!", ","])
self.assertEqual(word_counts1.dtype, dict)
self.__test_equal(word_counts1, expected1, dict)
self.assertEqual(word_counts2.dtype, dict)
self.__test_equal(word_counts2, expected2, dict)
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEqual(result.dtype, dict)
self.__test_equal(result, expected, dict)
self.assertEqual(result2.dtype, dict)
self.__test_equal(result2, expected2, dict)
self.assertEqual(result3.dtype, dict)
self.__test_equal(result3, expected3, dict)
self.assertEqual(result4.dtype, dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEqual(result5.dtype, dict)
self.__test_equal(result5, expected5, dict)
self.assertEqual(result6.dtype, dict)
self.__test_equal(result6, expected6, dict)
self.assertEqual(result7.dtype, dict)
self.__test_equal(result7, expected7, dict)
self.assertEqual(result8.dtype, dict)
self.__test_equal(result8, expected8, dict)
self.assertEqual(result9.dtype, dict)
self.__test_equal(result9, expected9, dict)
self.assertEqual(result10.dtype, dict)
self.__test_equal(result10, expected10, dict)
self.assertEqual(result11.dtype, dict)
self.__test_equal(result11, expected11, dict)
self.assertEqual(result12.dtype, dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEqual([set(i) for i in sa_keys], [{str(i), i} for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEqual(list(sa_keys), [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEqual(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEqual(list(sa_values), [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEqual(list(sa_values), [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEqual(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEqual(list(sa_values), [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEqual(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEqual(list(sa_values), [{'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEqual(list(sa_values), [{'b': 20, 'c':None}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEqual(list(sa_values), [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEqual(list(sa_values), [{'a':1, 'c':None}, {"b": 4, None: 5}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEqual(list(sa_values), [0,0,None,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEqual(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEqual(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEqual(list(sa_values), [1,1,None,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEqual(list(sa_values), [1,1,None,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEqual(list(sa_values), [1,0,None,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEqual(list(sa_values), [1,0,None,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEqual(list(sa_values), [1,0,None,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEqual(list(sa_values), [0,1,None,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEqual(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# similarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f)
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEqual(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEqual(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEqual(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEqual(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEqual(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEqual(len(u), 13)
# We do not preserve order
self.assertEqual(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEqual(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length()
self.assertEqual(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEqual(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEqual(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEqual(s[511], t[511])
self.assertEqual(s[1912], t[1912])
self.assertEqual(s[-1], t[-1])
self.assertEqual(s[-10], t[-10])
# A cache boundary
self.assertEqual(s[32*1024-1], t[32*1024-1])
self.assertEqual(s[32*1024], t[32*1024])
# totally different
self.assertEqual(s[19312], t[19312])
# edge case oddities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEqual(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEqual(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(list(result), list(ascending))
result = test.sort(ascending = False)
self.assertEqual(list(result), list(descending))
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10)]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(None, 100, str)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, str)
g = SArray.from_const(0, 100, float)
self.assertEqual(list(g), [0.0] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(0.0, 100, int)
self.assertEqual(list(g), [0] * 100)
self.assertEqual(g.dtype, int)
g = SArray.from_const(None, 100, float)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, float)
g = SArray.from_const(None, 100, int)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, int)
g = SArray.from_const(None, 100, list)
self.assertEqual(list(g), [None] * 100)
self.assertEqual(g.dtype, list)
g = SArray.from_const([1], 100, list)
self.assertEqual(list(g), [[1]] * 100)
self.assertEqual(g.dtype, list)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), list(range(100)))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), list(range(10, 100)))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), list(range(100, 10)))
def test_datetime(self):
sa = SArray(self.datetime_data)
self.__test_equal(sa ,self.datetime_data,dt.datetime)
sa = SArray(self.datetime_data2)
self.__test_equal(sa ,self.datetime_data2,dt.datetime)
ret = sa.split_datetime(limit=['year','month','day','hour','minute',
'second','us','weekday', 'isoweekday','tmweekday'])
self.assertEqual(ret.num_columns(), 10)
self.__test_equal(ret['X.year'] , [2013, 1902, None], int)
self.__test_equal(ret['X.month'] , [5, 10, None], int)
self.__test_equal(ret['X.day'] , [7, 21, None], int)
self.__test_equal(ret['X.hour'] , [10, 10, None], int)
self.__test_equal(ret['X.minute'] , [4, 34, None], int)
self.__test_equal(ret['X.second'] , [10, 10, None], int)
self.__test_equal(ret['X.us'] , [109321, 991111, None], int)
self.__test_equal(ret['X.weekday'] , [1, 1, None], int)
self.__test_equal(ret['X.isoweekday'] , [2, 2, None], int)
self.__test_equal(ret['X.tmweekday'] , [2, 2, None], int)
def test_datetime_difference(self):
sa = SArray(self.datetime_data)
sa2 = SArray(self.datetime_data2)
res = sa2 - sa
expected = [float(x.microsecond) / 1000000.0 if x is not None else x for x in self.datetime_data2]
self.assertEqual(len(res), len(expected))
for i in range(len(res)):
if res[i] is None:
self.assertEqual(res[i], expected[i])
else:
self.assertAlmostEqual(res[i], expected[i], places=6)
def test_datetime_lambda(self):
data = [dt.datetime(2013, 5, 7, 10, 4, 10, 109321),
dt.datetime(1902, 10, 21, 10, 34, 10, 991111,
tzinfo=GMT(1))]
g=SArray(data)
gstr=g.apply(lambda x:str(x))
self.__test_equal(gstr, [str(x) for x in g], str)
gident=g.apply(lambda x:x)
self.__test_equal(gident, list(g), dt.datetime)
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = self.datetime_data
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1)]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 20, 1)]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 6, 23, 19, 10, 1)]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1),
dt.datetime(2014, 10, 30, 21, 1)]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
sa = SArray(['2013-05-07T10:04:10',
'1902-10-21T10:34:10UTC+05:45'])
expected = [dt.datetime(2013, 5, 7, 10, 4, 10),
dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(5.75))]
self.__test_equal(sa.str_to_datetime() ,expected,dt.datetime)
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_argmax_argmin(self):
sa = SArray([1,4,-1,10,3,5,8])
index = [sa.argmax(),sa.argmin()]
expected = [3,2]
self.assertEqual(index,expected)
sa = SArray([1,4.3,-1.4,0,3,5.6,8.9])
index = [sa.argmax(),sa.argmin()]
expected = [6,2]
self.assertEqual(index,expected)
#empty case
sa = SArray([])
index = [sa.argmax(),sa.argmin()]
expected = [None,None]
self.assertEqual(index,expected)
# non-numeric type
sa = SArray(["434","43"])
with self.assertRaises(TypeError):
sa.argmax()
with self.assertRaises(TypeError):
sa.argmin()
def test_apply_with_recursion(self):
sa = SArray(range(1000))
sastr = sa.astype(str)
rets = sa.apply(lambda x:sastr[x])
self.assertEqual(list(rets), list(sastr))
def test_save_sarray(self):
'''save lazily evaluated SArray should not materialize to target folder
'''
data = SArray(range(1000))
data = data[data > 50]
#lazy and good
tmp_dir = tempfile.mkdtemp()
data.save(tmp_dir)
shutil.rmtree(tmp_dir)
print(data)
def test_to_numpy(self):
X = SArray(range(100))
import numpy as np
import numpy.testing as nptest
Y = np.array(range(100))
nptest.assert_array_equal(X.to_numpy(), Y)
X = X.astype(str)
Y = np.array([str(i) for i in range(100)])
nptest.assert_array_equal(X.to_numpy(), Y)
def test_rolling_mean(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [i + .5 for i in range(1,998)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_mean(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=3)
expected[2] = 1.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=2)
expected[1] = 0.5
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=1)
expected[0] = 0.0
self.__test_equal(res,expected,float)
res = data.rolling_mean(-3, 0, min_observations=0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_mean(-3,0,min_observations=-1)
res = neg_data.rolling_mean(-3,0)
expected = [None for i in range(3)] + [float(i) for i in range(-97,96,2)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = neg_data.astype(float).rolling_mean(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_mean(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i+.5, i+1.5]) for i in range(2,9)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_mean(0,4)
expected = [float(i) for i in range(2,998)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(0,4)
expected = [float(i) for i in range(-96,95,2)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### Small backward window not including current
res = data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(2,997)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-5,-1)
expected = [None for i in range(5)] + [float(i) for i in range(-96,94,2)]
self.__test_equal(res,expected,float)
### Small forward window not including current
res = data.rolling_mean(1,5)
expected = [float(i) for i in range(3,998)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(1,5)
expected = [float(i) for i in range(-94,96,2)] + [None for i in range(5)]
self.__test_equal(res,expected,float)
### "Centered" rolling aggregate
res = data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(2,998)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,2)
expected = [None for i in range(2)] + [float(i) for i in range(-96,96,2)] + [None for i in range(2)]
self.__test_equal(res,expected,float)
### Lopsided rolling aggregate
res = data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [i + .5 for i in range(1,998)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-2,1)
expected = [None for i in range(2)] + [float(i) for i in range(-97,97,2)] + [None for i in range(1)]
self.__test_equal(res,expected,float)
### A very forward window
res = data.rolling_mean(500,502)
expected = [float(i) for i in range(501,999)] + [None for i in range(502)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(50,52)
expected = [float(i) for i in range(2,98,2)] + [None for i in range(52)]
self.__test_equal(res,expected,float)
### A very backward window
res = data.rolling_mean(-502,-500)
expected = [None for i in range(502)] + [float(i) for i in range(1,499)]
self.__test_equal(res,expected,float)
res = neg_data.rolling_mean(-52,-50)
expected = [None for i in range(52)] + [float(i) for i in range(-98,-2,2)]
self.__test_equal(res,expected,float)
### A window size much larger than anticipated segment size
res = data.rolling_mean(0,749)
expected = [i + .5 for i in range(374,625)] + [None for i in range(749)]
self.__test_equal(res,expected,float)
### A window size larger than the array
res = data.rolling_mean(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_mean(0,0)
self.__test_equal(res, list(data), float)
res = data.rolling_mean(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, float)
res = data.rolling_mean(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_mean(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_mean(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_mean(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_mean(0,1)
self.__test_equal(res, [1.5,2.5,None], float)
def test_rolling_sum(self):
data = SArray(range(1000))
neg_data = SArray(range(-100,100,2))
### Small backward window including current
res = data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(6,3994,4)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_sum(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=3)
expected[2] = 3
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=2)
expected[1] = 1
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=1)
expected[0] = 0
self.__test_equal(res,expected,int)
res = data.rolling_sum(-3, 0, min_observations=0)
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_sum(-3,0,min_observations=-1)
res = neg_data.rolling_sum(-3,0)
expected = [None for i in range(3)] + [i for i in range(-388,388,8)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = neg_data.astype(float).rolling_sum(-3,0)
self.__test_equal(res,expected,float)
# Test vector input
res = SArray(self.vec_data).rolling_sum(-3,0)
expected = [None for i in range(3)] + [array.array('d',[i, i+4]) for i in range(10,38,4)]
self.__test_equal(res,expected,array.array)
### Small forward window including current
res = data.rolling_sum(0,4)
expected = [i for i in range(10,4990,5)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(0,4)
expected = [i for i in range(-480,480,10)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### Small backward window not including current
res = data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(10,4985,5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-5,-1)
expected = [None for i in range(5)] + [i for i in range(-480,470,10)]
self.__test_equal(res,expected,int)
### Small forward window not including current
res = data.rolling_sum(1,5)
expected = [i for i in range(15,4990,5)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(1,5)
expected = [i for i in range(-470,480,10)] + [None for i in range(5)]
self.__test_equal(res,expected,int)
### "Centered" rolling aggregate
res = data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(10,4990,5)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,2)
expected = [None for i in range(2)] + [i for i in range(-480,480,10)] + [None for i in range(2)]
self.__test_equal(res,expected,int)
### Lopsided rolling aggregate
res = data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(6,3994,4)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-2,1)
expected = [None for i in range(2)] + [i for i in range(-388,388,8)] + [None for i in range(1)]
self.__test_equal(res,expected,int)
### A very forward window
res = data.rolling_sum(500,502)
expected = [i for i in range(1503,2997,3)] + [None for i in range(502)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(50,52)
expected = [i for i in range(6,294,6)] + [None for i in range(52)]
self.__test_equal(res,expected,int)
### A very backward window
res = data.rolling_sum(-502,-500)
expected = [None for i in range(502)] + [i for i in range(3,1497,3)]
self.__test_equal(res,expected,int)
res = neg_data.rolling_sum(-52,-50)
expected = [None for i in range(52)] + [i for i in range(-294,-6,6)]
self.__test_equal(res,expected,int)
### A window size much larger than anticipated segment size
res = data.rolling_sum(0,749)
expected = [i for i in range(280875,469125,750)] + [None for i in range(749)]
self.__test_equal(res,expected,int)
### A window size larger than the array
res = data.rolling_sum(0,1000)
expected = [None for i in range(1000)]
self.__test_equal(res,expected,type(None))
### A window size of 1
res = data.rolling_sum(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_sum(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_sum(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_sum(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_sum(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_sum(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_sum(0,1)
self.__test_equal(res, [3,5,None], int)
def test_rolling_max(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_max(-3,0)
expected = [None for i in range(3)] + [i for i in range(3,1000)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_max(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_max(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_max(-3, 0, min_observations=3)
expected[2] = 2
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_max(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_max(-3,0)
### Small forward window including current
res = data.rolling_max(0,4)
expected = [float(i) for i in range(4,1000)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_max(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_max(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_max(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_max(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_max(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_max(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_max(0,1)
self.__test_equal(res, [2,3,None], int)
def test_rolling_min(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_min(-3,0)
expected = [None for i in range(3)] + [i for i in range(0,997)]
self.__test_equal(res,expected,int)
# Test float inputs as well
res = data.astype(float).rolling_min(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_min(-3, 0, min_observations=5)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=4)
self.__test_equal(res,expected,int)
res = data.rolling_min(-3, 0, min_observations=3)
expected[2] = 0
self.__test_equal(res,expected,int)
with self.assertRaises(ValueError):
res = data.rolling_min(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_min(-3,0)
### Small forward window including current
res = data.rolling_min(0,4)
expected = [float(i) for i in range(0,996)] + [None for i in range(4)]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_min(0,0)
self.__test_equal(res, list(data), int)
res = data.rolling_min(-2,-2)
expected = [None for i in range(2)] + list(data[0:998])
self.__test_equal(res, expected, int)
res = data.rolling_min(3,3)
expected = list(data[3:1000]) + [None for i in range(3)]
self.__test_equal(res, expected, int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_min(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_min(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_min(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_min(0,1)
self.__test_equal(res, [1,2,None], int)
def test_rolling_var(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_var(-3,0)
expected = [None for i in range(3)] + [1.25 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_var(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_var(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_var(-3, 0, min_observations=3)
expected[2] = (2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_var(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_var(-3,0)
### Small forward window including current
res = data.rolling_var(0,4)
expected = [2 for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_var(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_var(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_var(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_var(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_var(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_var(0,1)
self.__test_equal(res, [.25,.25,None], float)
def test_rolling_stdv(self):
data = SArray(range(1000))
### Small backward window including current
res = data.rolling_stdv(-3,0)
expected = [None for i in range(3)] + [1.118033988749895 for i in range(997)]
self.__test_equal(res,expected,float)
# Test float inputs as well
res = data.astype(float).rolling_stdv(-3,0)
self.__test_equal(res,expected,float)
# Test min observations
res = data.rolling_stdv(-3, 0, min_observations=5)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=4)
self.__test_equal(res,expected,float)
res = data.rolling_stdv(-3, 0, min_observations=3)
expected[2] = math.sqrt(2.0/3.0)
self.__test_equal(res,expected,float)
with self.assertRaises(ValueError):
res = data.rolling_stdv(-3,0,min_observations=-1)
# Test vector input
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.vec_data).rolling_stdv(-3,0)
### Small forward window including current
res = data.rolling_stdv(0,4)
expected = [math.sqrt(2) for i in range(996)] + [None for i in range(4)]
self.__test_equal(res,expected,float)
### A window size of 1
res = data.rolling_stdv(0,0)
self.__test_equal(res, [0 for i in range(1000)], float)
res = data.rolling_stdv(-2,-2)
self.__test_equal(res, [None,None] + [0 for i in range(998)], float)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_stdv(4,2)
### Non-numeric
with self.assertRaisesRegexp(RuntimeError, '.*support.*type.*'):
res = SArray(self.string_data).rolling_stdv(0,1)
### Empty SArray
sa = SArray()
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_stdv(0,1)
self.__test_equal(res, [.5,.5,None], float)
def test_rolling_count(self):
data = SArray(range(100))
### Small backward window including current
res = data.rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(97)]
self.__test_equal(res,expected,int)
# Test float inputs
res = data.astype(float).rolling_count(-3,0)
self.__test_equal(res,expected,int)
# Test vector input
res = SArray(self.vec_data).rolling_count(-3,0)
expected = [1,2,3] + [4 for i in range(7)]
self.__test_equal(res,expected,int)
### Test string input
res = SArray(self.string_data).rolling_count(-3,0)
self.__test_equal(res,expected[0:8],int)
### Small forward window including current
res = data.rolling_count(0,4)
expected = [5 for i in range(0,96)] + [4,3,2,1]
self.__test_equal(res,expected,int)
### A window size of 1
res = data.rolling_count(0,0)
self.__test_equal(res, [1 for i in range(100)], int)
res = data.rolling_count(-2,-2)
self.__test_equal(res, [0,0] + [1 for i in range(98)], int)
### A negative window size
with self.assertRaises(RuntimeError):
res = data.rolling_count(4,2)
### Empty SArray
sa = SArray()
res = sa.rolling_count(0,1)
self.__test_equal(res, [], type(None))
### Small SArray
sa = SArray([1,2,3])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,2,1], int)
sa = SArray([1,2,None])
res = sa.rolling_count(0,1)
self.__test_equal(res, [2,1,0], int)
def cumulative_aggregate_comparison(self, out, ans):
import array
self.assertEqual(out.dtype, ans.dtype)
self.assertEqual(len(out), len(ans))
for i in range(len(out)):
if out[i] is None:
self.assertTrue(ans[i] is None)
if ans[i] is None:
self.assertTrue(out[i] is None)
if type(out[i]) != array.array:
self.assertAlmostEqual(out[i], ans[i])
else:
self.assertEqual(len(out[i]), len(ans[i]))
oi = out[i]
ansi = ans[i]
for j in range(len(oi)):
self.assertAlmostEqual(oi, ansi)
def test_cumulative_sum(self):
def single_test(src, ans):
out = src.cumulative_sum()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_sum()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_sum()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 1.2, 3.3, 6.4, 10.5, 15.6, 21.7, 28.8])
)
single_test(
SArray([[11.0, 2.0], [22.0, 1.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([[11.0, 2.0], [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1, 4, 4, 9])
)
single_test(
SArray([None, [33.0, 3.0], [3.0, 4.0], [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [36.0, 7.0], [40.0, 11.0]])
)
single_test(
SArray([None, [33.0, 3.0], None, [4.0, 4.0]]),
SArray([None, [33.0, 3.0], [33.0, 3.0], [37.0, 7.0]])
)
def test_cumulative_mean(self):
def single_test(src, ans):
out = src.cumulative_mean()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_mean()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_mean()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.1, 0.6, 1.1, 1.6, 2.1, 2.6, 3.1, 3.6])
)
single_test(
SArray([[11.0, 22.0], [33.0, 66.0], [4.0, 2.0], [4.0, 2.0]]),
SArray([[11.0, 22.0], [22.0, 44.0], [16.0, 30.0], [13.0, 23.0]])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 1, 1.0, 2.0, 2.0, 3.0])
)
single_test(
SArray([None, [11.0, 22.0], [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
single_test(
SArray([None, [11.0, 22.0], None, [33.0, 66.0], [4.0, 2.0]]),
SArray([None, [11.0, 22.0], [11.0, 22.0], [22.0, 44.0], [16.0, 30.0]])
)
def test_cumulative_min(self):
def single_test(src, ans):
out = src.cumulative_min()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_min()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_min()
single_test(
SArray([0, 1, 2, 3, 4, 5, -1, 7, 8, -2, 10]),
SArray([0, 0, 0, 0, 0, 0, -1, -1, -1, -2, -2])
)
single_test(
SArray([7.1, 6.1, 3.1, 3.9, 4.1, 2.1, 2.9, 0.1]),
SArray([7.1, 6.1, 3.1, 3.1, 3.1, 2.1, 2.1, 0.1])
)
single_test(
SArray([None, 8, 6, 3, 4, None, 6, 2, 8, 9, 1]),
SArray([None, 8, 6, 3, 3, 3, 3, 2, 2, 2, 1])
)
single_test(
SArray([None, 5, None, 3, None, 10]),
SArray([None, 5, 5, 3, 3, 3])
)
def test_cumulative_max(self):
def single_test(src, ans):
out = src.cumulative_max()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_max()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_max()
single_test(
SArray([0, 1, 0, 3, 5, 4, 1, 7, 6, 2, 10]),
SArray([0, 1, 1, 3, 5, 5, 5, 7, 7, 7, 10])
)
single_test(
SArray([2.1, 6.1, 3.1, 3.9, 2.1, 8.1, 8.9, 10.1]),
SArray([2.1, 6.1, 6.1, 6.1, 6.1, 8.1, 8.9, 10.1])
)
single_test(
SArray([None, 1, 6, 3, 4, None, 4, 2, 8, 9, 1]),
SArray([None, 1, 6, 6, 6, 6, 6, 6, 8, 9, 9])
)
single_test(
SArray([None, 2, None, 3, None, 10]),
SArray([None, 2, 2, 3, 3, 10])
)
def test_cumulative_std(self):
def single_test(src, ans):
out = src.cumulative_std()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_std()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_std()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray([0.0, 0.5, 0.81649658092772603, 1.1180339887498949,
1.4142135623730949, 1.707825127659933, 1.9999999999999998,
2.2912878474779195])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.5, 0.816496580927726, 1.118033988749895,
1.4142135623730951, 1.707825127659933, 2.0, 2.29128784747792,
2.581988897471611, 2.8722813232690143, 3.1622776601683795])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 1.6329931618554521])
)
def test_cumulative_var(self):
def single_test(src, ans):
out = src.cumulative_var()
self.cumulative_aggregate_comparison(out, ans)
with self.assertRaises(RuntimeError):
sa = SArray(["foo"]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], ["foo"]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([{"bar": 1}]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1,1], [1], [1]]).cumulative_var()
with self.assertRaises(RuntimeError):
sa = SArray([[1], [1], [1], [1]]).cumulative_var()
single_test(
SArray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([0.1, 1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1]),
SArray( [0.0, 0.25000000000000006, 0.6666666666666666, 1.25,
1.9999999999999996, 2.916666666666666, 3.999999999999999,
5.249999999999998])
)
single_test(
SArray([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
SArray([None, 0.0, 0.25, 0.6666666666666666, 1.25, 2.0, 2.9166666666666665,
4.0, 5.25, 6.666666666666667, 8.25, 10.0])
)
single_test(
SArray([None, 1, None, 3, None, 5]),
SArray([None, 0.0, 0.0, 1.0, 1.0, 2.6666666666666665])
)
def test_numpy_datetime64(self):
# Make all datetimes naive
expected = [i.replace(tzinfo=GMT(0.0)) \
if i is not None and i.tzinfo is None else i for i in self.datetime_data]
# A regular list
iso_str_list = [np.datetime64('2013-05-07T10:04:10Z'),
np.datetime64('1902-10-21T10:34:10Z'),
None]
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
iso_str_list[2] = np.datetime64('NaT')
sa = SArray(iso_str_list)
self.__test_equal(sa,expected,dt.datetime)
# A numpy array
np_ary = np.array(iso_str_list)
sa = SArray(np_ary)
self.__test_equal(sa,expected,dt.datetime)
### Every possible type of datetime64
test_str = '1969-12-31T23:59:56Z'
available_time_units = ['h','m','s','ms','us','ns','ps','fs','as']
expected = [dt.datetime(1969,12,31,23,59,56,tzinfo=GMT(0.0)) for i in range(7)]
expected.insert(0,dt.datetime(1969,12,31,23,59,0,tzinfo=GMT(0.0)))
expected.insert(0,dt.datetime(1969,12,31,23,0,0,tzinfo=GMT(0.0)))
for i in range(len(available_time_units)):
sa = SArray([np.datetime64(test_str,available_time_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
test_str = '1908-06-01'
available_date_units = ['Y','M','W','D']
expected = [dt.datetime(1908,6,1,0,0,0,tzinfo=GMT(0.0)) for i in range(4)]
expected[2] = dt.datetime(1908,5,28,0,0,0,tzinfo=GMT(0.0)) # weeks start on Thursday?
expected[0] = dt.datetime(1908,1,1,0,0,0,tzinfo=GMT(0.0))
for i in range(len(available_date_units)):
sa = SArray([np.datetime64(test_str,available_date_units[i])])
self.__test_equal(sa,[expected[i]],dt.datetime)
# Daylight savings time (Just to be safe. datetime64 deals in UTC, and
# we store times in UTC by default, so this shouldn't affect anything)
sa = SArray([np.datetime64('2015-03-08T02:38:00-08')])
expected = [dt.datetime(2015,3,8,10,38,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
# timezone considerations
sa = SArray([np.datetime64('2016-01-01T05:45:00+0545')])
expected = [dt.datetime(2016,1,1,0,0,0,tzinfo=GMT(0.0))]
self.__test_equal(sa, expected, dt.datetime)
### Out of our datetime range
with self.assertRaises(TypeError):
sa = SArray([np.datetime64('1066-10-14T09:00:00Z')])
def test_pandas_timestamp(self):
iso_str_list = [pd.Timestamp('2013-05-07T10:04:10'),
|
pd.Timestamp('1902-10-21T10:34:10Z')
|
pandas.Timestamp
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
''' Loop over all runs and find the optimal policy for each.
Plot them over time and include demand and order count. '''
for run in range(10):
p_df = pd.read_csv('./data/Pgrid_%d.csv' % run)
p_data = p_df.to_numpy()[:,1:]
o_df = pd.read_csv('./data/Ogrid_%d.csv' % run)
o_data = o_df.to_numpy()[:,1:]
d_df = pd.read_csv('./data/demand_time%d.csv' % run)
demand = d_df.to_numpy()[:,1]
''' Get max Profit and track the inventory path.
We use armax to get the index of the maximum profit in a row. '''
print(p_data.shape)
max_idx = np.argmax(p_data[1,:])
print("Starting invantory size for max profit:", max_idx)
# Init empty arrays
inv_array = np.zeros(1000)
order_array = np.zeros(1000)
order_n = o_data[0, max_idx]
demand_array = np.zeros(1000)
demand_array[0] = demand[0]
inv_array[0] = max_idx - demand[0]
# Loop over all timesteps and track the optimal policy
order_array[0] = order_n
idx = max_idx
for n in range(1,1000):
idx = int(idx + order_n)
order_n = o_data[n, idx]
inv_array[n] = inv_array[n-1] - demand[n]
order_array[n] = order_array[n] + order_n
demand_array[n] = demand_array[n-1] + demand[n]
# It follow an overcomplicated code to prepare the data for a plot
time_array = np.linspace(0,1000, num=1000, dtype=int)
df_order = pd.DataFrame()
df_order['Amount'] = order_array
df_order['Timesteps'] = time_array
df_order['Feature'] = ["Order"]*1000
df_inv = pd.DataFrame()
df_inv['Amount'] = inv_array
df_inv['Timesteps'] = time_array
df_inv['Feature'] = ["Inventory"]*1000
df_demand = pd.DataFrame()
df_demand['Amount'] = demand_array
df_demand['Timesteps'] = time_array
df_demand['Feature'] = ["Demand"]*1000
# We concatenate the data to fit it in one plot
if run == 0:
df_plot = pd.concat([df_inv, df_order, df_demand])
else:
df_plot2 =
|
pd.concat([df_inv, df_order, df_demand])
|
pandas.concat
|
"""InPhaDel: Genotypes and phase deletions on a single chromosome using a specific classification model
Trains models for phasing deletions using underlying WGS+HiC data
"""
import sys
import os
import pickle
import pandas as pd
import numpy as np
import warnings
from itertools import izip
from sklearn import svm
from sklearn import ensemble
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from svphase.utils.common import logger
from svphase.utils.config import RANDOM_STATE,DATA_PREFIX
from svphase.learn.cov import FileStructureDataWithTruth, RPKMAdaptor
from svphase.learn.evaluation import Evaluation
from svphase.learn.features import HicOnlySubset, WgsOnlySubset
from svphase.inphadel import default_arguments
class Model(object):
def __init__(self, model_pkl, random_state):
self.pkl = model_pkl
self.random_state = random_state
self.clf = None
self.params = {}
def clf_stats(self):
pass
class SVMModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = svm.SVC(kernel='linear', probability=True, random_state=self.random_state)
self.params = {'C':[.1,1,10,100]}
class RFModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = ensemble.RandomForestClassifier(oob_score=True, random_state=self.random_state)
self.params = {'n_estimators':[10,20,50,100], 'max_depth':[2,5,10,20]}
def clf_stats(self):
logger.info('RFModel: OOB_Score {0:0.4f}'.format(self.clf.oob_score_))
class KNNModel(Model):
def __init__(self, model_pkl, random_state):
Model.__init__(self, model_pkl, random_state)
self.clf = KNeighborsClassifier(n_neighbors=5, weights='uniform', algorithm='brute')
self.params = {'n_neighbors':[2,4,8,16,32]}
class Trainer(Evaluation):
def __init__(self, k=5, feature_subset=None):
Evaluation.__init__(self, feature_subset=feature_subset)
self.k = k
self.inner_models = []
self.inner_accuracies = []
self._cpred_index =map(lambda x:'cpred:'+x,self.label_obj.classes)
self._tpred_index =map(lambda x:'tpred:'+x,self.label_obj.classes)
def check_stable(self, scores, common_params_df):
thresh = 0.10
deviation = max(scores)-min(scores)
if deviation>thresh:
logger.warning('Model test accuracies deviate more than {thresh:0.1f}%, Deviation {dev:0.4f}'.format(thresh=thresh*100, dev=deviation))
if len(common_params_df.index)>1:
logger.warning('Model had unstable parameters\n{len:s}'.format(len=common_params_df))
def _to_series(self, outer_fold, inner_fold, test_accuracy, correct_preds, total_preds, params):
if correct_preds is None:
c = pd.Series([0,]*len(self.label_obj.classes), index=self._cpred_index, dtype=int)
else:
c = pd.Series(correct_preds, index=self._cpred_index, dtype=int)
if total_preds is None:
t = pd.Series([0,]*len(self.label_obj.classes), index=self._tpred_index, dtype=int)
else:
t = pd.Series(total_preds, index=self._tpred_index, dtype=int)
return pd.concat([
|
pd.Series([outer_fold, inner_fold, test_accuracy], index=['outer_fold','inner_fold', 'test_accuracy'])
|
pandas.Series
|
import pandas as pd
import numpy as np
def clean_static_df(static_df):
static_df_clean = static_df
static_df_clean = pd.get_dummies(data=static_df_clean, columns=[
'sex']).drop('sex_MALE', axis=1)
static_df_clean.drop('discharge_destination', axis=1, inplace=True)
static_df_clean.loc[:, 'datetime_min'] = pd.to_datetime(static_df_clean['datetime_min'],
dayfirst=True)
first_lab_time_col = pd.to_datetime(
static_df_clean['datetime_min'], dayfirst=True)
static_df_clean.loc[:, 'icu_in_year'] = first_lab_time_col.dt.year
static_df_clean.loc[:, 'icu_in_month'] = first_lab_time_col.dt.month
static_df_clean.loc[:, 'icu_in_day'] = first_lab_time_col.dt.day
static_df_clean.drop('datetime_min', axis=1, inplace=True)
static_df_clean.drop('datetime_max', axis=1, inplace=True)
# TODO: why is there no hourly info?
static_df_clean.set_index('patient_id')
return static_df_clean
def clean_dynamic_df(dynamic_df, static_df):
dynamic_df_times = dynamic_df.merge(static_df[['patient_id','datetime_min']],
how='inner', on='patient_id')
dynamic_df_times.loc[:, 'datetime_min'] = pd.to_datetime(dynamic_df_times['datetime_min'],
dayfirst=True)
dynamic_df_times.loc[:, 'datetime'] = pd.to_datetime(dynamic_df_times['datetime'],
dayfirst=True)
dynamic_df_times.loc[:, 'hours_in'] = \
(dynamic_df_times['datetime'] - dynamic_df_times['datetime_min'])
dynamic_df_times = dynamic_df_times.drop(['datetime','datetime_min'], axis=1)
# upsample to hourly
dynamic_df_hourly = dynamic_df_times.set_index('hours_in').groupby('patient_id').resample('H').mean()
dynamic_df_hourly = dynamic_df_hourly.drop('patient_id', axis=1).reset_index()
# dynamic_df_hourly = dynamic_df_hourly.set_index('patient_id')
dynamic_df_clean = dynamic_df_hourly.set_index(['patient_id','hours_in']).dropna(how='all')
return dynamic_df_clean
def clean_treat_df(treat_df, static_df):
treat_df_times = treat_df.merge(static_df[['patient_id','datetime_min']],
how='inner', on='patient_id')
treat_df_times.loc[:, 'datetime_min'] = pd.to_datetime(treat_df_times['datetime_min'],
dayfirst=True)
treat_df_times.loc[:, 'datetime'] = pd.to_datetime(treat_df_times['date'],
dayfirst=True)
treat_df_times.loc[:, 'hours_in'] = \
(treat_df_times['datetime'] - treat_df_times['datetime_min'])
treat_df_times = treat_df_times.drop(['datetime','datetime_min'], axis=1)
treat_df_hourly = treat_df_times.set_index('hours_in').groupby('patient_id').resample('H').mean()
treat_df_hourly = treat_df_hourly.drop('patient_id', axis=1).reset_index()
treat_df_clean = treat_df_hourly.set_index(['patient_id','hours_in']).dropna(how='all')
return treat_df_clean
def clean_outcome_df(static_df):
outcome_df = static_df[['patient_id','discharge_destination','datetime_min','datetime_max']]
outcome_df.loc[:,'datetime_min'] = pd.to_datetime(outcome_df['datetime_min'])
outcome_df.loc[:,'datetime_max'] = pd.to_datetime(outcome_df.loc[:,'datetime_max'])
outcome_df['hours_in'] = outcome_df['datetime_max'] - outcome_df['datetime_min']
outcome_df.loc[:, 'death'] = (outcome_df['discharge_destination'] == 'Fallecimiento') * 1.0
outcome_df = outcome_df.drop(['discharge_destination','datetime_min','datetime_max'], axis=1)
outcome_df = outcome_df.set_index('patient_id')
return outcome_df
def get_CXYT_for_modeling(static_df, dynamic_df, treat_df, outcome_df, output_filepath):
static_df = static_df.reset_index()
dynamic_df = dynamic_df.reset_index()
treat_df = treat_df.reset_index()
outcome_df = outcome_df.reset_index()
# upsample treatemnt to hourly before merging
treat_df.loc[:, 'hours_in'] = pd.to_timedelta(treat_df['hours_in'])
dynamic_df.loc[:, 'hours_in'] =
|
pd.to_timedelta(dynamic_df['hours_in'])
|
pandas.to_timedelta
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors":
|
pandas.StringDtype()
|
pandas.StringDtype
|
from abc import abstractmethod
import datetime as dt
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_equal
from pandas import DataFrame, Series, Timedelta, date_range
import pytest
from arch import doc
from arch.univariate.base import implicit_constant
from arch.utility.array import (
ConcreteClassMeta,
DocStringInheritor,
cutoff_to_index,
date_to_index,
ensure1d,
ensure2d,
find_index,
parse_dataframe,
)
@pytest.fixture(scope="function")
def rng():
return RandomState(12345)
def test_ensure1d():
out = ensure1d(1.0, "y")
assert_equal(out, np.array([1.0]))
out = ensure1d(np.arange(5.0), "y")
assert_equal(out, np.arange(5.0))
out = ensure1d(np.arange(5.0)[:, None], "y")
assert_equal(out, np.arange(5.0))
in_array = np.reshape(np.arange(16.0), (4, 4))
with pytest.raises(ValueError):
ensure1d(in_array, "y")
y = Series(np.arange(5.0))
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y.columns = [1]
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = Series(np.arange(5.0), name="series")
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
y = DataFrame(y)
ys = ensure1d(y, "y")
assert isinstance(ys, np.ndarray)
ys = ensure1d(y, "y", True)
assert isinstance(ys, Series)
ys.name = 1
ys = ensure1d(ys, None, True)
assert isinstance(ys, Series)
assert ys.name == "1"
y = DataFrame(np.reshape(np.arange(10), (5, 2)))
with pytest.raises(ValueError):
ensure1d(y, "y")
def test_ensure2d():
s = Series([1, 2, 3], name="x")
df = ensure2d(s, "x")
assert isinstance(df, DataFrame)
df2 = ensure2d(df, "x")
assert df is df2
npa = ensure2d(s.values, "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
npa = ensure2d(np.array(1.0), "x")
assert isinstance(npa, np.ndarray)
assert npa.ndim == 2
with pytest.raises(ValueError):
ensure2d(np.array([[[1]]]), "x")
with pytest.raises(TypeError):
ensure2d([1], "x")
def test_parse_dataframe():
s = Series(np.arange(10.0), name="variable")
out = parse_dataframe(s, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
df = DataFrame(s)
out = parse_dataframe(df, "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["variable"])
out = parse_dataframe(np.arange(10.0), "y")
assert_equal(out[1], np.arange(10.0))
assert_equal(out[0], ["y"])
out = parse_dataframe(None, "name")
assert out[0] == ["name"]
assert isinstance(out[1], np.ndarray)
assert out[1].shape == (0,)
def test_implicit_constant(rng):
x = rng.standard_normal((1000, 2))
assert not implicit_constant(x)
x[:, 0] = 1.0
assert implicit_constant(x)
x = rng.standard_normal((1000, 3))
x[:, 0] = x[:, 0] > 0
x[:, 1] = 1 - x[:, 0]
assert implicit_constant(x)
def test_docstring_inheritor():
class A(object, metaclass=DocStringInheritor):
"""
Docstring
"""
class B(A):
pass
assert_equal(B.__doc__, A.__doc__)
def test_date_to_index():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
index = date_to_index(date_index[-1], date_index)
assert_equal(index, date_index.shape[0] - 1)
index = date_to_index("2009-08-02", date_index)
assert_equal(index, 500)
index = date_to_index("2009-08-04", date_index)
assert_equal(index, 501)
index = date_to_index("2009-08-01", date_index)
assert_equal(index, 500)
index = date_to_index(dt.datetime(2009, 8, 1), date_index)
assert_equal(index, 500)
with pytest.raises(ValueError):
date_to_index(dt.date(2009, 8, 1), date_index)
z = y + 0.0
z.index = np.arange(3000)
num_index = z.index
with pytest.raises(ValueError):
date_to_index(dt.datetime(2009, 8, 1), num_index)
idx = date_range("1999-12-31", periods=3)
df = DataFrame([1, 2, 3], index=idx[::-1])
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
df = DataFrame([1, 2, 3], index=[idx[0]] * 3)
with pytest.raises(ValueError):
date_to_index(idx[0], df.index)
with pytest.raises(ValueError):
date_to_index("NaT", idx)
# check whether this also works for a localized datetimeindex
date_index = date_range("20000101", periods=3000, freq="W", tz="Europe/Berlin")
index = date_to_index(date_index[0], date_index)
assert_equal(index, 0)
def test_date_to_index_timestamp():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = y.index[1000]
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1000)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
def test_():
dr = date_range("20000101", periods=3000, freq="W")
y = Series(np.arange(3000.0), index=dr)
date_index = y.index
date = date_index[1000] + Timedelta(1, "D")
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index = date_to_index(date, date_index)
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, 1001)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
date = date_index[0] - Timedelta(1, "D")
index = date_to_index(date, date_index)
assert_equal(index, 0)
date_pydt = date.to_pydatetime()
date_npdt = date.to_datetime64()
date_str = date_pydt.strftime("%Y-%m-%d")
index_pydt = date_to_index(date_pydt, date_index)
index_npdt = date_to_index(date_npdt, date_index)
index_str = date_to_index(date_str, date_index)
assert_equal(index, index_npdt)
assert_equal(index, index_pydt)
assert_equal(index, index_str)
def test_cutoff_to_index():
dr =
|
date_range("20000101", periods=3000, freq="W")
|
pandas.date_range
|
import argparse
from datetime import datetime, timezone
from xml.etree import ElementTree as ET
import requests
import pandas as pd
from src import config
def get_time():
utc_dt = datetime.now(timezone.utc)
dt = utc_dt.astimezone()
return dt
def get_response_status_code(root):
status_code = '500'
if root.tag != 'response':
return status_code
for code in root.iter('resultCode'):
status_code = code.text
return status_code
def get_holiday_info(year, month, operation):
month_fill = '{0:02d}'.format(int(month))
url = '/'.join([config.PUBLIC_DATA_DOMAIN, config.PUBLIC_DATA_HOLIDAY_URI,
operation])
params = {
'solYear' : year,
'solMonth' : month_fill,
'ServiceKey' : config.PUBLIC_DATA_PORTAL_KEY,
'numOfRows' : config.DEFAULT_NUM_PAGE
}
res = requests.get(url=url, params=params)
return res
def parse_response(root, holiday_df):
total_count = root.find('body/totalCount')
if total_count is not None and (total_count.text > '0') :
for item in root.findall('body/items/item'):
res = {'date' : item.find('locdate').text,
'name' : item.find('dateName').text,
'type' : item.find('dateKind').text,
'is_holiday' : item.find('isHoliday').text
}
holiday_df = holiday_df.append(res, ignore_index=True)
return holiday_df
def run(year):
holiday_df =
|
pd.DataFrame(columns=['date', 'name', 'type', 'is_holiday'])
|
pandas.DataFrame
|
# IMPORTING LIBRARIES -----------------------------------------------------------------------------------
#region
import pandas as pd
import numpy as np
import os
import re
import glob
import csv
import shutil
#endregion
# INPUT VARIABLES----------------------------------------------------------------------------------------
#region
# Directory folder of the csv files you want to process
Input_path_CSVs = 'C:/FILES/Input_CSV/'
# Can change to xlsx if needed, other changes will be nessesary to code
Extension = 'csv'
# Csv files seperator for input and output files..generally (,) or (|)
Delimiter = '|'
# Directory folder of the TKC cross reference table
Input_path_TKC_files = 'D:/FILES/Input_TKC_files/'
# Directory excel file of the Sample Point Table
Input_path_SPT = 'C:/FILES/Sample Points_37883_20180926_134607.xlsx'
# Output folder of the CSV files
Output_path_processed_csv = 'C:/FILES/Output_CSV_Processed/'
# Output folder path of bad SPT CSV files
Output_path_badSPT = 'C:/FILES/Output_CSV_Bad_SPT/'
# Output folder path of TKC Unmapped Data
Output_path_badTKC = 'C:/FILES/Output_CSV_Bad_TKC/'
# Output folder path of Retest CSV files
Output_path_Retests = 'C:/FILES/Output_CSV_Retests/'
# Output folder path of CSV Files with Structure that can't be Analysed
Output_path_bad_structure = 'C:/FILES/Output_CSV_Bad_Column_Structure/'
# Output folder path of Report on Analysed files
Output_path_Report = 'C:/FILES/'
print('Directories loaded...')
#endregion
# READ AND PROCESS THE UNIQUE SAMPLE POINTS FILE----------------------------------------------------------------
#region
df_SPTs =
|
pd.read_excel(Input_path_SPT, sheet_name='Data', dtype={'Name': object, 'OldSiteCode_post2007': object})
|
pandas.read_excel
|
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"values, dtype",
[
([], "object"),
([1, 2, 3], "int64"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "object"),
(["a", "b", "c"], "string"),
([1, 2, 3], "datetime64[ns]"),
([1, 2, 3], "datetime64[ns, CET]"),
([1, 2, 3], "timedelta64[ns]"),
(["2000", "2001", "2002"], "Period[D]"),
([1, 0, 3], "Sparse"),
([
|
pd.Interval(0, 1)
|
pandas.Interval
|
import os
import html5lib
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from datetime import date, timedelta, datetime as dt
from pymongo import MongoClient
from itertools import cycle
import numpy as np
# from kdriver import RemoteDriverStartService
class RemoteDriverStartService():
options = webdriver.ChromeOptions()
# Set user app data to a new directory
options.add_argument("user-data-dir=C:\\Users\\Donley\\App Data\\Google\\Chrome\\Application\\User Data\\Kit")
options.add_experimental_option("Proxy", "null")
options.add_experimental_option("excludeSwitches", ["ignore-certificate-errors"])
# Create a download path for external data sources as default:
options.add_experimental_option("prefs", {
"download.default_directory": r"C:\Users\Donley\Documents\GA_TECH\SUBMISSIONS\PROJECT2-CHALLENGE\data\external",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
}),
# Add those optional features to capabilities
caps = options.to_capabilities()
def start_driver(self):
return webdriver.Remote(command_executor='http://127.0.0.1:4444',
desired_capabilities=self.caps)
# Connect to MongoDB
client = MongoClient("mongodb://localhost:27017")
db = client['investopedia']
def invsto_scrape():
# Set class equal to new capabilities:
DesiredCapabilities = RemoteDriverStartService()
# Create variables for scraping:
investo = "https://www.investopedia.com/top-communications-stocks-4583180"
# Download data to paths, csv's, json, etc:
# for external data sources
external = "../data/external/"
# for processed data sources with ID's
processed = "../data/processed/"
# Locate Driver in system
current_path = os.getcwd()
# save the .exe file under the same directory of the web-scrape python script.
Path = os.path.join(current_path, "chromedriver")
# Initialize Chrome driver and start browser session controlled by automated test software under Kit profile.
caps = webdriver.DesiredCapabilities.CHROME.copy()
caps['acceptInsecureCerts'] = True
# caps = webdriver.DesiredCapabilities.CHROME.copy()
# caps['acceptInsecureCerts'] = True
# driver = webdriver.Chrome(options=options, desired_capabilities=caps)
driver = webdriver.Chrome(executable_path='chromedriver', desired_capabilities=caps)
##Step 3: Find the IDs of the items we want to scrape for [5]
# Start Grabbing Information from investopedia:
driver.get(investo)
driver.maximize_window()
timeout = 30
# Find an ID on the page and wait before executing anything until found:
try:
WebDriverWait(driver, timeout).until(EC.visibility_of_element_located((By.ID, "main_1-0")))
except TimeoutException:
driver.quit()
##Step 5: The full code that runs the scraper and save the data to .csv files
itable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
itables = pd.read_html(itable)
communications_bv = itables[0]
communications_bv.columns = ["Communictaions Best Value", "Price", "Market Cap", "12-Month Trailing P/E Ratio"]
communications_bv
# Locate column containing ticker symbols:
communications_bv_df = communications_bv.iloc[1:]
# Only keep tick information within parentheses:
communications_bv_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_bv_df["Communictaions Best Value"]]
communications_bv_ticks
communications_fg = itables[1]
communications_fg.columns = ["Communications Fastest Growing", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_fg_df = communications_fg.iloc[1:]
communications_fg_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_fg_df["Communications Fastest Growing"]]
communications_fg_ticks
communications_mm = itables[2]
communications_mm.columns = ["Communications Most Momentum", "Price", "Market Cap", "12-Month Trailing Total Return (%)"]
communications_mm_df = communications_mm.iloc[1:]
communications_mm_ticks = [tick[tick.find("(")+1:tick.find(")")] for tick in communications_mm_df["Communications Most Momentum"]]
del communications_mm_ticks[-2:]
communications_mm_ticks
discretionary = driver.find_elements(By.CSS_SELECTOR, '#journey-nav__sublist_1-0 > li:nth-child(3) > a')
discretionary
discretionary[0].click()
dtable = driver.find_element_by_id("main_1-0").get_attribute('outerHTML')
dtables =
|
pd.read_html(dtable)
|
pandas.read_html
|
import logging
from collections import defaultdict
import paramiko
import pandas as pd
from stat import S_ISDIR
from ipso_phen.ipapi.database.pandas_wrapper import PandasDbWrapper
from ipso_phen.ipapi.file_handlers.fh_phenopsys import FileHandlerPhenopsis
try:
from ipso_phen.ipapi.database.db_connect_data import db_connect_data as dbc
conf = dbc.get("phenopsis", {})
except Exception as e:
conf = {}
logger = logging.getLogger(__name__)
PHENOPSIS_ROOT_FOLDER = "./phenopsis"
FILES_PER_CONNEXION = 400
IMAGE_EXTENSIONS = (".jpg", ".tiff", ".png", ".bmp", ".tif", ".pim", ".csv")
def connect_to_phenodb():
p = paramiko.SSHClient()
p.set_missing_host_key_policy(paramiko.AutoAddPolicy)
p.connect(
conf["address"],
port=conf["port"],
username=conf["user"],
password=conf["password"],
)
return p
def get_pheno_db_ftp():
return connect_to_phenodb().open_sftp()
def get_phenopsis_exp_list() -> list:
assert conf, "Unable to connect to phenoserre"
try:
ftp = get_pheno_db_ftp()
exp_lst = sorted(ftp.listdir(path=PHENOPSIS_ROOT_FOLDER))
ftp.close()
except Exception as e:
logger.error("Unable to reach Phenopsis")
return []
else:
return [exp for exp in exp_lst if exp != "csv"]
def isdir(ftp, path):
try:
return S_ISDIR(ftp.stat(path).st_mode)
except IOError:
# Path does not exist, so by definition not a directory
return False
class PhenopsisDbWrapper(PandasDbWrapper):
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self.df_builder = self.get_exp_as_df
self.main_selector = {"view_option": "sw755"}
def get_all_files(
self,
ftp,
path,
extensions,
):
ret = []
sf = ftp.listdir_attr(path=path)
for fd in sf:
obj_path = f"{path}/{fd.filename}"
self._callback_undefined()
if isdir(ftp=ftp, path=obj_path):
ret.extend(
self.get_all_files(
ftp=ftp,
path=obj_path,
extensions=extensions,
)
)
else:
if obj_path.lower().endswith(extensions):
ret.append(FileHandlerPhenopsis(file_path=obj_path, database=None))
return ret
def get_local_df(self, exp_name) -> pd.DataFrame:
csv_path = (
"C:/Users/fmavianemac/Documents/Felicia/Python/database/data_out/phenopsis/"
+ f"{exp_name.lower()}.dst.csv"
)
dataframe = pd.read_csv(csv_path)
dataframe["Experiment"] = dataframe["experiment"].str.lower()
dataframe["Plant"] = dataframe["plant"].str.lower()
dataframe["date_time"] =
|
pd.to_datetime(dataframe["date_time"], utc=True)
|
pandas.to_datetime
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
""" test get/set & misc """
import pytest
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_scalar
from pandas import (Series, DataFrame, MultiIndex,
Timestamp, Timedelta, Categorical)
from pandas.tseries.offsets import BDay
from pandas.compat import lrange, range
from pandas.util.testing import (assert_series_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestMisc(TestData):
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
@pytest.mark.parametrize(
'result_1, duplicate_item, expected_1',
[
[
pd.Series({1: 12, 2: [1, 2, 2, 3]}), pd.Series({1: 313}),
pd.Series({1: 12, }, dtype=object),
],
[
pd.Series({1: [1, 2, 3], 2: [1, 2, 2, 3]}),
pd.Series({1: [1, 2, 3]}), pd.Series({1: [1, 2, 3], }),
],
])
def test_getitem_with_duplicates_indices(
self, result_1, duplicate_item, expected_1):
# GH 17610
result = result_1.append(duplicate_item)
expected = expected_1.append(duplicate_item)
assert_series_equal(result[1], expected)
assert result[2] == result_1[2]
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_series_box_timestamp(self):
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng)
assert isinstance(ser[5], pd.Timestamp)
rng = pd.date_range('20090415', '20090519', freq='B')
ser = Series(rng, index=rng)
assert isinstance(ser[5], pd.Timestamp)
assert isinstance(ser.iat[5], pd.Timestamp)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(
|
tm.makeIntIndex(20)
|
pandas.util.testing.makeIntIndex
|
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.map(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi =
|
pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
|
pandas.Series
|
from django.conf import settings
import pandas as pd
import os
info_path = os.path.join(settings.BASE_DIR, 'info.pkl')
fin_path = os.path.join(settings.BASE_DIR, 'fin.pkl')
mc_path = os.path.join(settings.BASE_DIR, 'mc.pkl')
info = pd.read_pickle(info_path)
fin = pd.read_pickle(fin_path)
mc = pd.read_pickle(mc_path)
def backtest(model, n):
bt = Backtest(model=model, bm=BM, fin=fin, mc=mc, info=info, n=n)
bt.run()
return bt
def get_fisyear(date):
if type(date)==str:
date =
|
pd.Timestamp(date)
|
pandas.Timestamp
|
import glob, os, sys
import pandas as pd
def get_name(files):
name = []
for i in range(len(files)):
basename = os.path.basename(files[i])
split = os.path.splitext(basename)
name.append(split[0])
return name
if __name__ == '__main__':
# exception handling
os.makedirs('./result/data', exist_ok=True)
if (os.path.isdir('./output') == True):
files = glob.glob('./output/*.csv')
else:
print('Error: Not found ./output')
sys.exit()
if files == []:
print('Error: Not found csv file')
sys.exit()
# aggregate predict results
name = get_name(files)
ind = ['original', 'sm', 'b1', 'b2', 'enn', 'tom', 'ada', 'mnd']
col = ['os', 'Sensitivity', 'Specificity', 'G-mean', 'F-1', 'MCC', 'AUC']
for i in range(len(ind)):
svm = pd.DataFrame(index=[], columns=col)
tree = pd.DataFrame(index=[], columns=col)
knn =
|
pd.DataFrame(index=[], columns=col)
|
pandas.DataFrame
|
"""
Script to process the HDF5 files and convert them to netcdf
"""
#==============================================================================
__title__ = "Site Vegetation data"
__author__ = "<NAME>"
__version__ = "v1.0(04.04.2019)"
__email__ = "<EMAIL>"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import argparse
import datetime as dt
from collections import OrderedDict
import warnings as warn
from scipy import stats
import xarray as xr
from numba import jit
import bottleneck as bn
import scipy as sp
import glob
from dask.diagnostics import ProgressBar
import myfunctions.PlotFunctions as pf
import myfunctions.corefunctions as cf
from netCDF4 import Dataset, num2date, date2num
import shutil
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import seaborn as sns
import cartopy.crs as ccrs
import cartopy.feature as cpf
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Import debugging packages
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
def main():
force = False
# ========== loop over the sensors and start years
for sen, yrst, dsn in zip(["terra", "aqua"], [2000, 2002], ["MOD13C1", "MYD13C1"]):
# ========== Make the path and tmp folder ==========
path = "/media/ubuntu/Seagate Backup Plus Drive/Data51/NDVI/5.MODIS/%s/" % sen
if sen == "aqua":
path += "5km/"
tmp = path + "tmp/"
cf.pymkdir(tmp)
prop = path+"processed/"
cf.pymkdir(prop)
# ========== Set out the file types ==========
nclist = ["complete", "monmax", "anmax"]
fncom = [prop + 'MODIS_%s_%s_5kmCMG_%s.nc' % (sen, dsn, ty) for ty in nclist]
# ipdb.set_trace()
if all([os.path.isfile(fn) for fn in fncom]) and not force:
# ds = xr.open_dataset(fncom)
ftmp = glob.glob("%s%s.A*.*.nc" % (path, dsn))
fmmtmp = glob.glob("%s/monmax/*.nc" % (path))
famtmp = glob.glob("%s/anmax/*.nc" % (path))
# ipdb.set_trace()
else:
# ========== Make a list of the temp files ==========
ftmp = []
fmmtmp = []
famtmp = []
# ========== Loop over each year ==========
for year in range(yrst, 2020):
# ========== get the list of files in a given year ==========
files = glob.glob("%s%s.A%d*.*.hdf" % (path, dsn, year))
fctmp = [filefix(sen, fname, tmp, year, force) for fname in files]
ftmp += fctmp
dsin = xr.open_mfdataset(fctmp)
# ========== Fix the datte issues ==========
dsin = dsin.reindex(time=sorted(dsin.time.values))
mm, am = dsresample(dsin, sen, dsn, tmp, year, force)
fmmtmp.append(mm)
famtmp.append(am)
# ========== Loop over the configs ==========
for ty, fnl, fn in zip(nclist, [ftmp, fmmtmp, famtmp], fncom):
if not os.path.isfile(fn) or force:
print("stacking the %s MODIS %s data: " % (ty, sen), pd.Timestamp.now())
# ========== Create a new multifile dataset ==========
ds = xr.open_mfdataset(fnl)
# ========== Fix the datte issues ==========
ds = ds.reindex(time=sorted(ds.time.values))
# # ========== Slice off the unnessary data ==========
if ty == "anmax":
ds = ds.sel(time=slice(None,"2018-12-31"))
dts = datefixer(
|
pd.to_datetime(ds.time.values)
|
pandas.to_datetime
|
"""Extract minimal growth media and growth rates."""
import pandas as pd
import micom
from micom import load_pickle
from micom.media import minimal_medium
from micom.workflows import workflow
from micom.logger import logger
logger = micom.logger.logger
try:
max_procs = snakemake.threads
except NameError:
max_procs = 20
def media_and_gcs(sam):
com = load_pickle("data/models/" + sam + ".pickle")
# Get growth rates
try:
sol = com.cooperative_tradeoff(fraction=0.5)
rates = sol.members["growth_rate"].copy()
rates["community"] = sol.growth_rate
rates.name = sam
except Exception:
logger.warning("Could not solve cooperative tradeoff for %s." % sam)
return None
# Get the minimal medium
med = minimal_medium(com, 0.95 * sol.growth_rate, exports=True)
med.name = sam
# Apply medium and reoptimize
com.medium = med[med > 0]
sol = com.cooperative_tradeoff(fraction=0.5, fluxes=True, pfba=False)
fluxes = sol.fluxes
fluxes["sample"] = sam
return {"medium": med, "gcs": rates, "fluxes": fluxes}
samples = pd.read_csv("data/recent.csv")
gcs =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2=pd.DataFrame()
try:
# "connect" to the App Logs
lx=Lx.AppLog(h5File=h5File)
if hasattr(lx, 'h5FileLDSRes'):
logger.error("{0:s}{1:s}".format(logStr,'In den TCs nur Res und nicht Res1 und Res2?!'))
raise RmError
# zu lesende Daten ermitteln
l=dfSegsNodesNDataDpkt['DruckResIDBase'].unique()
l = l[~pd.isnull(l)]
DruckErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
#
l=dfSegsNodesNDataDpkt['SEGResIDBase'].unique()
l = l[~pd.isnull(l)]
SEGErgIDs=[*[ID+'AL_S' for ID in l],*[ID+'STAT_S' for ID in l],*[ID+'SB_S' for ID in l],*[ID+'ZHKNR_S' for ID in l]]
ErgIDs=[*DruckErgIDs,*SEGErgIDs]
# Daten lesen
TCsLDSRes1,TCsLDSRes2=lx.getTCsFromH5s(LDSResOnly=True,LDSResColsSpecified=ErgIDs,timeShiftPair=timeShiftPair)
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
dfCVDataOnly=lx.getCVDFromH5(timeDelta=timeDelta,returnDfCVDataOnly=True)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
def processAlarmStatistikData(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,tdAllowed=None # pd.Timedelta('1 second')
# Alarm geht ... Alarm kommt (wieder): wenn Zeitspanne ... <= tdAllowed, dann wird dies _gewertet als dieselbe Alarmzeitspanne
# d.h. es handelt sich _gewertet inhaltlich um denselben Alarm
# None zählt die Alarme strikt getrennt
):
"""
Returns: SEGResDct,DruckResDct
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.<KEY>.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: <KEY>S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Zeiten SEGErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['SEGResIDBase'].unique() if not pd.isnull(baseID)]
SEGResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes1,tdAllowed=tdAllowed)
logger.debug("{:s}SEGResDct: {!s:s}".format(logStr,SEGResDct))
# Zeiten DruckErgs mit zustaendig und Alarm ...
l=[baseID for baseID in dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]['DruckResIDBase'].unique() if not pd.isnull(baseID)]
DruckResDct=fGetResTimes(ResIDBases=l,df=TCsLDSRes2,tdAllowed=tdAllowed)
logger.debug("{:s}DruckResDct: {!s:s}".format(logStr,DruckResDct))
# verschiedene Auspraegungen pro Alarmzeit ermitteln
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
for keyStr, colExt in zip(['AL_S_SB_S','<KEY>'],['SB_S','ZHKNR_S']):
lGes=[]
if tPairs != []:
for tPair in tPairs:
col=ID+colExt
lSingle=ResSrc.loc[tPair[0]:tPair[1],col]
lSingle=[int(x) for x in lSingle if pd.isnull(x)==False]
lSingle=[lSingle[0]]+[lSingle[i] for i in range(1,len(lSingle)) if lSingle[i]!=lSingle[i-1]]
lGes.append(lSingle)
IDDct[keyStr]=lGes
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def addNrToAlarmStatistikData(
SEGResDct={}
,DruckResDct={}
,dfAlarmEreignisse=pd.DataFrame()
):
"""
Returns: SEGResDct,DruckResDct added with key AL_S_NR
ResDct:
key: baseID (i.e. Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_SPV.In.
value: dct
key: Zustaendig: value: Zeitbereiche, in denen der Ergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen der Ergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen der Ergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
key: AL_S_SB_S: value: Liste mit Listen (den verschiedenen SB_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
key: AL_S_ZHKNR_S: value: Liste mit Listen (den verschiedenen ZHKNR_S pro Alarm in der zeitlichen Reihenfolge ihres Auftretens) (Länge der Liste == Länge der Liste von Alarm)
# ergänzt:
key: AL_S_NR: value: Liste mit der Nr. (aus dfAlarmEreignisse) pro Alarm (Länge der Liste == Länge der Liste von Alarm)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#
for ResDct, LDSResBaseType in zip([SEGResDct, DruckResDct],['SEG','Druck']):
for idxID,(ID,IDDct) in enumerate(ResDct.items()):
# IDDct: das zu erweiternde Dct
# Alarme
tPairs=IDDct['Alarm']
lNr=[]
if tPairs != []:
ZHKNRnListen=IDDct['AL_S_ZHKNR_S']
for idxAlarm,tPair in enumerate(tPairs):
ZHKNRnListe=ZHKNRnListen[idxAlarm]
ZHKNR=ZHKNRnListe[0]
ae=AlarmEvent(tPair[0],tPair[1],ZHKNR,LDSResBaseType)
Nr=dfAlarmEreignisse[dfAlarmEreignisse['AlarmEvent']==ae]['Nr'].iloc[0]
lNr.append(Nr)
IDDct['AL_S_NR']=lNr
# das erweiterte Dct zuweisen
ResDct[ID]=IDDct
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGResDct,DruckResDct
def processAlarmStatistikData2(
DruckResDct=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
):
"""
Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
Returns: SEGDruckResDct
ResDct:
key: baseID
value: dct
sortiert und direkt angrenzende oder gar ueberlappende Zeiten aus Druckergebnissen zusammenfasst
key: Zustaendig: value: Zeitbereiche, in denen ein Druckergebnisvektor zustaendig ist (Liste von Zeitstempelpaaren)
key: Alarm: value: Zeitbereiche, in denen ein Druckergebnisvektor in Alarm war (Liste von Zeitstempelpaaren)
key: Stoerung: value: Zeitbereiche, in denen ein Druckergebnisvektor in Stoerung war (Liste von Zeitstempelpaaren)
voneiander verschiedene Ausprägungen (sortiert) aus Druckergebnissen
key: AL_S_SB_S: Liste
key: AL_S_ZHKNR_S: Liste
key: AL_S_NR: Liste
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Druckergebnisaussagen auf Segmentergebnisaussagen geeignet verdichtet
SEGDruckResDct={}
# merken, ob eine ID bereits bei einem SEG gezählt wurde; die Alarme einer ID sollen nur bei einem SEG gezaehlt werden
IDBereitsGezaehlt={}
# über alle DruckErgs
for idx,(ID,tPairsDct) in enumerate(DruckResDct.items()):
# SEG ermitteln
# ein DruckErg kann zu mehreren SEGs gehoeren z.B. gehoert ein Verzweigungsknoten i.d.R. zu 3 versch. SEGs
tupleLst=getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,ID)
for idxTuple,(DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) in enumerate(tupleLst):
# wenn internes SEG
if SEGOnlyInLDSPara:
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt da dieses SEG intern.".format(logStr,ID,SEGName))
continue
# ID wurde bereits gezählt
if ID in IDBereitsGezaehlt.keys():
logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern wurde bereits bei SEGName {:s} gezaehlt.".format(logStr,ID,SEGName,IDBereitsGezaehlt[ID]))
continue
else:
# ID wurde noch nicht gezaehlt
IDBereitsGezaehlt[ID]=SEGName
#if idxTuple>0:
# logger.debug("{:s}ID {:35s} wird bei SEGName {:s} nicht gezaehlt sondern nur bei SEGName {:s}.".format(logStr,ID,SEGName,tupleLst[0][1]))
# continue
if len(tPairsDct['Alarm'])>0:
logger.debug("{:s}SEGName {:20s}: durch ID {:40s} mit Alarm. Nr des Verweises von ID auf ein Segment: {:d}".format(logStr,SEGName,ID, idxTuple+1))
if SEGResIDBase not in SEGDruckResDct.keys():
# auf dieses SEG wurde noch nie verwiesen
SEGDruckResDct[SEGResIDBase]=deepcopy(tPairsDct) # das Segment erhält die Ergebnisse des ersten Druckvektors der zum Segment gehört
else:
# ergaenzen
# Zeitlisten ergänzen
for idx2,ext in enumerate(ResChannelTypes):
tPairs=tPairsDct[ResChannelResultNames[idx2]]
for idx3,tPair in enumerate(tPairs):
if True: #tPair not in SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]]: # keine identischen Zeiten mehrfach zaehlen
# die Ueberlappung von Zeiten wird weiter unten behandelt
SEGDruckResDct[SEGResIDBase][ResChannelResultNames[idx2]].append(tPair)
# weitere Listen ergaenzen
for ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']:
SEGDruckResDct[SEGResIDBase][ext]=SEGDruckResDct[SEGResIDBase][ext]+tPairsDct[ext]
# Ergebnis: sortieren und dann direkt angrenzende oder gar ueberlappende Zeiten zusammenfassen
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S','AL_S_NR']: # keine Zeiten
pass
else:
tPairs=tPairsDct[ResChannelResultNames[idx2]]
tPairs=sorted(tPairs,key=lambda tup: tup[0])
tPairs=fCombineSubsequenttPairs(tPairs)
SEGDruckResDct[ID][ResChannelResultNames[idx2]]=tPairs
# voneiander verschiedene Ausprägungen (sortiert)
for idx,(ID,tPairsDct) in enumerate(SEGDruckResDct.items()):
for idx2,ext in enumerate(tPairsDct.keys()):
v=tPairsDct[ext]
if ext in ['AL_S_SB_S','AL_S_ZHKNR_S']: # Liste von Listen
l=[*{*chain.from_iterable(v)}]
l=sorted(pd.unique(l))
SEGDruckResDct[ID][ext]=l
elif ext in ['AL_S_NR']: # Liste
l=sorted(pd.unique(v))
SEGDruckResDct[ID][ext]=l
else:
pass
logger.debug("{:s}SEGDruckResDct: {!s:s}".format(logStr,SEGDruckResDct))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return SEGDruckResDct
def buildAlarmDataframes(
TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,SEGResDct={}
,DruckResDct={}
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR']
,NrAsc=[False]+4*[True]
):
"""
Returns dfAlarmStatistik,dfAlarmEreignisse,SEGDruckResDct
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmStatistik=pd.DataFrame()
dfAlarmEreignisse=pd.DataFrame()
try:
# Ereignisse
dfAlarmEreignisse=buildDfAlarmEreignisse(
SEGResDct=SEGResDct
,DruckResDct=DruckResDct
,TCsLDSRes1=TCsLDSRes1
,TCsLDSRes2=TCsLDSRes2
,dfCVDataOnly=dfCVDataOnly
,dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt
,replaceTup=replaceTup
,NrBy=NrBy
,NrAsc=NrAsc
)
# in dfAlarmEreignisse erzeugte Alarm-Nr. an Dct merken
SEGResDct,DruckResDct=addNrToAlarmStatistikData(
SEGResDct
,DruckResDct
,dfAlarmEreignisse
)
# BZKat der Alarme
def fGetAlarmKat(row):
"""
"""
# baseID des Alarms
baseID=row['OrteIDs'][0]
# dct des Alarms
if row['LDSResBaseType']=='SEG':
dct=SEGResDct[baseID]
else:
dct=DruckResDct[baseID]
# Nrn der baseID
Nrn=dct['AL_S_NR']
# idx dieses Alarms innerhalb der Alarme der baseID
idxAl=Nrn.index(row['Nr'])
# Zustaende dieses alarms
SB_S=dct['AL_S_SB_S'][idxAl]
kat=''
if 3 in SB_S:
kat='instationär'
else:
if 2 in SB_S:
kat = 'schw. instationär'
else:
if 1 in SB_S:
kat = 'stat. Fluss'
elif 4 in SB_S:
kat = 'stat. Ruhe'
return kat
dfAlarmEreignisse['BZKat']=dfAlarmEreignisse.apply(lambda row: fGetAlarmKat(row),axis=1)
# Segment-verdichtete Druckergebnisse
SEGDruckResDct=processAlarmStatistikData2(
DruckResDct
,TCsLDSRes2
,dfSegsNodesNDataDpkt
)
# Alarmstatistik bilden
dfAlarmStatistik=dfSegsNodesNDataDpkt[~dfSegsNodesNDataDpkt['SEGOnlyInLDSPara']]
dfAlarmStatistik=dfAlarmStatistik[['DIVPipelineName','SEGName','SEGNodes','SEGResIDBase']].drop_duplicates(keep='first').reset_index(drop=True)
dfAlarmStatistik['Nr']=dfAlarmStatistik.apply(lambda row: "{:2d}".format(int(row.name)),axis=1)
# SEG
dfAlarmStatistik['FörderZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Zustaendig'])
dfAlarmStatistik['FörderZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Alarm'])
dfAlarmStatistik['FörderZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['Stoerung'])
dfAlarmStatistik['FörderZeitenAlAnz']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['FörderZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_SB_S'])
dfAlarmStatistik['FörderZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGResDct[x]['AL_S_NR'])
# Druck (SEG-verdichtet)
dfAlarmStatistik['RuheZeiten']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Zustaendig'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAl']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Alarm'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenSt']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['Stoerung'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlSbs']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_SB_S'] if x in SEGDruckResDct.keys() else [])
dfAlarmStatistik['RuheZeitenAlNrn']=dfAlarmStatistik['SEGResIDBase'].apply(lambda x: SEGDruckResDct[x]['AL_S_NR'] if x in SEGDruckResDct.keys() else [])
#dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: len(x))
dfAlarmStatistik['RuheZeitenAlAnz']=dfAlarmStatistik['RuheZeitenAlNrn'].apply(lambda x: len(x))
# je 3 Zeiten bearbeitet
dfAlarmStatistik['FörderZeit']=dfAlarmStatistik['FörderZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeit']=dfAlarmStatistik['RuheZeiten'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitAl']=dfAlarmStatistik['FörderZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitAl']=dfAlarmStatistik['RuheZeitenAl'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['FörderZeitSt']=dfAlarmStatistik['FörderZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
dfAlarmStatistik['RuheZeitSt']=dfAlarmStatistik['RuheZeitenSt'].apply(lambda x: fTotalTimeFromPairs(x,pd.Timedelta('1 minute'),False))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse, dfAlarmStatistik,SEGDruckResDct
def plotDfAlarmStatistik(
dfAlarmStatistik=pd.DataFrame()
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=dfAlarmStatistik[[
'Nr'
,'DIVPipelineName'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'FörderZeitSt'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
,'RuheZeitSt'
]].copy()
# diese Zeiten um (Störzeiten) annotieren
df['FörderZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['FörderZeit'],row['FörderZeitSt']) if row['FörderZeitSt'] > 0. else row['FörderZeit'] ,axis=1)
df['RuheZeit']=df.apply(lambda row: "{!s:s} ({!s:s})".format(row['RuheZeit'],row['RuheZeitSt']) if row['RuheZeitSt'] > 0. else row['RuheZeit'],axis=1)
# LfdNr. annotieren
df['LfdNr']=df.apply(lambda row: "{:2d} - {:s}".format(int(row.Nr)+1,str(row.DIVPipelineName)),axis=1)
# Zeiten Alarm um Alarm-Nrn annotieren
def fAddZeitMitNrn(zeit,lAlNr):
if len(lAlNr) > 0:
if len(lAlNr) <= 3:
return "{!s:s} (Nrn.: {!s:s})".format(zeit,lAlNr)
else:
# mehr als 3 Alarme...
return "{!s:s} (Nrn.: {!s:s}, ...)".format(zeit,lAlNr[0])
else:
return zeit
df['FörderZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['FörderZeitAl'],row['FörderZeitenAlNrn']),axis=1)
df['RuheZeitAl']=dfAlarmStatistik.apply(lambda row: fAddZeitMitNrn(row['RuheZeitAl'],row['RuheZeitenAlNrn']),axis=1)
df=df[[
'LfdNr'
,'SEGName'
,'FörderZeit'
,'FörderZeitenAlAnz'
,'FörderZeitAl'
,'RuheZeit'
,'RuheZeitenAlAnz'
,'RuheZeitAl'
]]
try:
t=plt.table(cellText=df.values, colLabels=df.columns, loc='center')
cols=df.columns.to_list()
colIdxLfdNr=cols.index('LfdNr')
colIdxFoerderZeit=cols.index('FörderZeit')
colIdxFoerderZeitenAlAnz=cols.index('FörderZeitenAlAnz')
colIdxFoerderZeitAl=cols.index('FörderZeitAl')
colIdxRuheZeit=cols.index('RuheZeit')
colIdxRuheZeitenAlAnz=cols.index('RuheZeitenAlAnz')
colIdxRuheZeitAl=cols.index('RuheZeitAl')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
if row == 0:
if col in [colIdxRuheZeit,colIdxRuheZeitenAlAnz,colIdxRuheZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='plum')
elif col in [colIdxFoerderZeit,colIdxFoerderZeitenAlAnz,colIdxFoerderZeitAl]:
pass
cellObj.set_text_props(backgroundcolor='lightsteelblue')
if col == colIdxLfdNr:
if row==0:
continue
if 'color' in dfAlarmStatistik.columns.to_list():
color=dfAlarmStatistik['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
if col == colIdxFoerderZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'FörderZeitSt']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxFoerderZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'FörderZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Förderzeit
if df.loc[row-1,'FörderZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # palegoldenrod
#if df.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'FörderZeitAl']/ dfAlarmStatistik.loc[row-1,'FörderZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
if col == colIdxRuheZeit:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
pass
else:
if dfAlarmStatistik.loc[row-1,'RuheZeitSt']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='goldenrod')
if col == colIdxRuheZeitenAlAnz:
if row==0:
continue
if dfAlarmStatistik.loc[row-1,'RuheZeit']==0:
cellObj.set_text_props(backgroundcolor='lightgrey')
else: # hat Ruhezeit
if df.loc[row-1,'RuheZeitenAlAnz']==0:
cellObj.set_text_props(backgroundcolor='springgreen')
else:
pass
cellObj.set_text_props(ha='center')
cellObj.set_text_props(backgroundcolor='navajowhite') # # palegoldenrod
#if df.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
if dfAlarmStatistik.loc[row-1,'RuheZeitAl']/ dfAlarmStatistik.loc[row-1,'RuheZeit']*100>1:
cellObj.set_text_props(backgroundcolor='tomato')
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def fOrteStripped(LDSResBaseType,OrteIDs):
"""
returns Orte stripped
"""
if LDSResBaseType == 'SEG': # 'Objects.3S_FBG_SEG_INFO.3S_L_6_MHV_02_FUD.In.']
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_'+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
elif LDSResBaseType == 'Druck': # Objects.3S_FBG_DRUCK.3S_6_BNV_01_PTI_01.In
orteStripped=[]
for OrtID in OrteIDs:
pass
m=re.search(Lx.pID,OrtID+'dummy')
ortStripped=m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
orteStripped.append(ortStripped)
return orteStripped
else:
return None
def fCVDTime(row,dfSEG,dfDruck,replaceTup=('2021-','')):
"""
in:
dfSEG/dfDruck: TCsLDSRes1/TCsLDSRes2
row: Zeile aus dfAlarmEreignisse
von row verwendet:
LDSResBaseType: SEG (dfSEG) oder nicht (dfDruck)
OrteIDs: ==> ID von ZHKNR_S in dfSEG/dfDruck
ZHKNR: ZHKNR
returns:
string: xZeitA - ZeitEx
ZeitA: erste Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
ZeitE: letzte Zeit in der ZHKNR_S in dfSEG/dfDruck den Wert von ZHKNR trägt
xZeitA, wenn ZeitA die erste Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
xZeitE, wenn ZeitE die letzte Zeit in dfSEG/dfDruck ist mit einem von Null verschiedenen Wert
in Zeit wurde replaceTup angewendet
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
Time=""
ID=row['OrteIDs'][0]+'ZHKNR_S'
ZHKNR=row['ZHKNR']
if row['LDSResBaseType']=='SEG':
df=dfSEG
else:
df=dfDruck
s=df[df[ID]==ZHKNR][ID] # eine Spalte; Zeilen in denen ZHKNR_S den Wert von ZHKNR trägt
tA=s.index[0] # 1. Zeit
tE=s.index[-1] # letzte Zeit
Time=" {!s:s} - {!s:s} ".format(tA,tE)
try:
if tA==df[ID].dropna().index[0]:
Time='x'+Time.lstrip()
except:
logger.debug("{0:s}Time: {1:s}: x-tA Annotation Fehler; keine Annotation".format(logStr,Time))
try:
if tE==df[ID].dropna().index[-1]:
Time=Time.rstrip()+'x'
except:
logger.debug("{0:s}Time: {1:s}: x-tE Annotation Fehler; keine Annotation".format(logStr,Time))
Time=Time.replace(replaceTup[0],replaceTup[1])
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return Time
def buildDfAlarmEreignisse(
SEGResDct={}
,DruckResDct={}
,TCsLDSRes1=pd.DataFrame()
,TCsLDSRes2=pd.DataFrame()
,dfCVDataOnly=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,replaceTup=('2021-','')
,NrBy=['LDSResBaseType','SEGName','Ort','tA','ZHKNR'] # Sortierspalten für die Nr. der Ereignisse
,NrAsc=[False]+4*[True] # aufsteigend j/n für die o.g. Sortierspalten
):
"""
Returns dfAlarmEreignisse:
Nr: lfd. Nr (gebildet gem. NrBy und NrAsc)
tA: Anfangszeit
tE: Endezeit
tD: Dauer des Alarms
ZHKNR: ZHKNR (die zeitlich 1., wenn der Alarm sich über mehrere ZHKNRn erstreckt)
tD_ZHKNR: Lebenszeit der ZHKNR; x-Annotationen am Anfang/Ende, wenn ZHK beginnt bei Res12-Anfang / andauert bei Res12-Ende; '-1', wenn Lebenszeit nicht ermittelt werden konnte
ZHKNRn: sortierte Liste der ZHKNRn des Alarms; eine davon ist ZHKNR; typischerweise die 1. der Liste
LDSResBaseType: SEG oder Druck
OrteIDs: OrteIDs des Alarms
Orte: Kurzform von OrteIDs des Alarms
Ort: der 1. Ort von Orte
SEGName: Segment zu dem der 1. Ort des Alarms gehört
DIVPipelineName:
Voralarm: ermittelter Vorlalarm des Alarms; -1, wenn kein Voralarm in Res12 gefunden werden konnte
Type: Typ des Kontrollraumns; z.B. p-p für vollständige Flussbilanzen; '', wenn kein Typ gefunden werden konnte
Name: Name des Bilanzraumes
NrSD: lfd. Nr Alarm BaseType
NrName: lfd. Nr Alarm Name
NrSEGName: lfd. Nr Alarm SEGName
AlarmEvent: AlarmEvent-Objekt
###BZKat: Betriebszustandskategorie des Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfAlarmEreignisse=pd.DataFrame()
try:
AlarmEvents=[] # Liste von AlarmEvent
AlarmEventsOrte={} # dct der Orte, die diesen (key) AlarmEvent melden
AlarmEventsZHKNRn={} # dct der ZHKNRn, die zu diesem (key) gehoeren
# über SEG- und Druck-Ergebnisvektoren
for ResDct, ResSrc, LDSResBaseType in zip([SEGResDct, DruckResDct],[TCsLDSRes1,TCsLDSRes2],['SEG','Druck']):
for ResIDBase,dct in ResDct.items():
AL_S=dct['Alarm']
if len(AL_S) > 0:
# eine Erg-ID weist Alarme [(tA,tE),...] auf
# korrespondiernede Liste der ZHKs: [(999,1111),...]
ZHKNRnListen=dct['AL_S_ZHKNR_S']
ID=ResIDBase+'ZHKNR_S' # fuer nachfolgende Ausgabe
# ueber alle Alarme der Erg-ID
for idx,AL_S_Timepair in enumerate(AL_S):
(t1,t2)=AL_S_Timepair # tA, tE
ZHKNR_S_Lst=ZHKNRnListen[idx] # Liste der ZHKs in dieser Zeit
if len(ZHKNR_S_Lst) != 1:
logger.warning(("{:s}ID:\n\t {:s}: Alarm {:d} der ID\n\t Zeit von {!s:s} bis {!s:s}:\n\t Anzahl verschiedener ZHKNRn !=1: {:d} {:s}:\n\t ZHKNR eines Alarms wechselt waehrend eines Alarms. Alarm wird identifiziert mit 1. ZHKNR.".format(logStr,ID
,idx
,t1
,t2
,len(ZHKNR_S_Lst)
,str(ZHKNR_S_Lst)
)))
# die erste wird verwendet
ZHKNR=int(ZHKNR_S_Lst[0])
# AlarmEvent erzeugen
alarmEvent=AlarmEvent(t1,t2,ZHKNR,LDSResBaseType)
if alarmEvent not in AlarmEvents:
# diesen Alarm gibt es noch nicht in der Ereignisliste ...
AlarmEvents.append(alarmEvent)
AlarmEventsOrte[alarmEvent]=[]
AlarmEventsZHKNRn[alarmEvent]=[]
else:
pass
# Ort ergaenzen (derselbe Alarm wird erst ab V83.5.3 nur an einem Ort - dem lexikalisch kleinsten des Bilanzraumes - ausgegeben; zuvor konnte derselbe Alarm an mehreren Orten auftreten)
AlarmEventsOrte[alarmEvent].append(ResIDBase)
# ZHKNR(n) ergaenzen (ein Alarm wird unter 1 ZHKNR geführt)
AlarmEventsZHKNRn[alarmEvent].append(ZHKNR_S_Lst)
# df erzeugen
dfAlarmEreignisse=pd.DataFrame.from_records(
[alarmEvent for alarmEvent in AlarmEvents],
columns=AlarmEvent._fields
)
# Liste der EventOrte erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
l.append(AlarmEventsOrte[alarmEvent])
dfAlarmEreignisse['OrteIDs']=l
# abgekuerzte Orte
dfAlarmEreignisse['Orte']=dfAlarmEreignisse.apply(lambda row: fOrteStripped(row.LDSResBaseType,row.OrteIDs),axis=1)
dfAlarmEreignisse['Ort']=dfAlarmEreignisse['Orte'].apply(lambda x: x[0])
# Liste der ZHKNRn erstellen, zuweisen
l=[]
for idx,alarmEvent in enumerate(AlarmEvents):
lOfZl=AlarmEventsZHKNRn[alarmEvent]
lOfZ=[*{*chain.from_iterable(lOfZl)}]
lOfZ=sorted(pd.unique(lOfZ))
l.append(lOfZ)
dfAlarmEreignisse['ZHKNRn']=l
# Segmentname eines Ereignisses
dfAlarmEreignisse['SEGName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==row['OrteIDs'][0]]['SEGName'].iloc[0] if row['LDSResBaseType']=='SEG'
else [tuple for tuple in getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt,row['OrteIDs'][0]) if not tuple[-1]][0][1],axis=1)
# DIVPipelineName eines Ereignisses
dfAlarmEreignisse['DIVPipelineName']=dfAlarmEreignisse.apply(lambda row:
dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DIVPipelineName'].iloc[0]
,axis=1)
# Alarm: ---
#tA: Anfangszeit
#tE: Endezeit
#ZHKNR: ZHKNR (1. bei mehreren Alarmen)
#LDSResBaseType: SEG oder Druck
# Orte: ---
#OrteIDs: OrteIDs des Alarms
#Orte: Kurzform von OrteIDs des Alarms
#ZHKNRn:
#SEGName: Segmentname
#DIVPipelineName
## Nr.
dfAlarmEreignisse.sort_values(by=NrBy,ascending=NrAsc,inplace=True)
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
#dfAlarmEreignisse['Nr']=dfAlarmEreignisse['Nr']+1
logger.debug("{0:s}{1:s}: {2:s}".format(logStr,'dfAlarmEreignisse',dfAlarmEreignisse.to_string()))
# Voralarm
VoralarmTypen=[]
for index, row in dfAlarmEreignisse.iterrows():
# zur Information bei Ausgaben
OrteIDs=row['OrteIDs']
OrtID=OrteIDs[0]
VoralarmTyp=None
try:
if row['LDSResBaseType']=='SEG':
VoralarmTyp=TCsLDSRes1.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
elif row['LDSResBaseType']=='Druck':
VoralarmTyp=TCsLDSRes2.loc[:row['tA']-pd.Timedelta('1 second'),OrtID+'AL_S'].iloc[-1]
except:
pass
if pd.isnull(VoralarmTyp): # == None: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=-1
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: kein (isnull) Vorlalarm gefunden?! (ggf. neutraler BRWechsel) - Voralarm gesetzt auf: {:d}".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA'],int(VoralarmTyp)))
if int(VoralarmTyp)==0: # == 0: #?! - ggf. Nachfolger eines neutralen Bilanzraumwechsels
VoralarmTyp=0
logger.warning("{:s}PV: {:40s} Alarm Nr. {:d} ZHKNR {:d}\n\t tA {!s:s}: Vorlalarm 0?! (ggf. war Bilanz in Stoerung)".format(logStr
,row['OrteIDs'][0]
,int(row['Nr'])
,row['ZHKNR']
,row['tA']))
if int(VoralarmTyp) not in [-1,0,3,4,10]:
logger.warning("{:s}PV: {:s} Alarm Nr. {:d} {:d} tA {!s:s}: unbekannter Vorlalarm gefunden: {:d}".format(logStr,row['OrteIDs'][0],int(row['Nr']),row['ZHKNR'],row['tA'],int(VoralarmTyp)))
logger.debug("{:s}{:d} {!s:s} VoralarmTyp:{:d}".format(logStr,int(row['Nr']),row['tA'],int(VoralarmTyp)))
VoralarmTypen.append(VoralarmTyp)
dfAlarmEreignisse['Voralarm']=[int(x) for x in VoralarmTypen]
# Type (aus dfCVDataOnly) und Erzeugungszeit (aus dfCVDataOnly) und Name (aus dfCVDataOnly)
dfAlarmEreignisse['ZHKNR']=dfAlarmEreignisse['ZHKNR'].astype('int64')
dfAlarmEreignisse['ZHKNRStr']=dfAlarmEreignisse['ZHKNR'].astype('string')
dfCVDataOnly['ZHKNRStr']=dfCVDataOnly['ZHKNR'].astype('string')
# wg. aelteren App-Log Versionen in denen ZHKNR in dfCVDataOnly nicht ermittelt werden konnte
# Type,ScenTime,Name sind dann undefiniert
dfAlarmEreignisse=pd.merge(dfAlarmEreignisse,dfCVDataOnly,on='ZHKNRStr',suffixes=('','_CVD'),how='left').filter(items=dfAlarmEreignisse.columns.to_list()+['Type'
#,'ScenTime'
,'Name'])
dfAlarmEreignisse=dfAlarmEreignisse.drop(['ZHKNRStr'],axis=1)
dfAlarmEreignisse=dfAlarmEreignisse.fillna(value='')
# lfd. Nummern
dfAlarmEreignisse['NrSD']=dfAlarmEreignisse.groupby(['LDSResBaseType']).cumcount() + 1
dfAlarmEreignisse['NrName']=dfAlarmEreignisse.groupby(['Name']).cumcount() + 1
dfAlarmEreignisse['NrSEGName']=dfAlarmEreignisse.groupby(['SEGName']).cumcount() + 1
# Lebenszeit der ZHKNR
try:
dfAlarmEreignisse['tD_ZHKNR']=dfAlarmEreignisse.apply(lambda row: fCVDTime(row,TCsLDSRes1,TCsLDSRes2,replaceTup),axis=1)
except:
logger.debug("{:s}Spalte tD_ZHKNR (Lebenszeit einer ZHKNR) konnte nicht ermittelt werden. Vmtl. aeltere App-Log Version.".format(logStr))
dfAlarmEreignisse['tD_ZHKNR']='-1'
# Dauer des Alarms
dfAlarmEreignisse['tD']=dfAlarmEreignisse.apply(lambda row: row['tE']-row['tA'],axis=1)
dfAlarmEreignisse['tD']= dfAlarmEreignisse['tD'].apply(lambda x: "{!s:s}".format(x).replace('days','Tage').replace('0 Tage','').replace('Tage','T'))
# AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
dfAlarmEreignisse=dfAlarmEreignisse[['Nr','tA', 'tE','tD','ZHKNR','tD_ZHKNR','ZHKNRn','LDSResBaseType'
,'OrteIDs', 'Orte', 'Ort', 'SEGName','DIVPipelineName'
,'Voralarm', 'Type', 'Name'
,'NrSD', 'NrName', 'NrSEGName'
]]
dfAlarmEreignisse['AlarmEvent']=dfAlarmEreignisse.apply(lambda row: AlarmEvent(row['tA'],row['tE'],row['ZHKNR'],row['LDSResBaseType']),axis=1)
# unklar, warum erforderlich
dfAlarmEreignisse['Nr']=dfAlarmEreignisse.index+1
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfAlarmEreignisse
def fCVDName(Name
):
"""
"""
lName=len(Name)
if len(Name)==0:
Name='ZHKName vmtl. nicht in Log'
lNameMaxH=20
if lName > 2*lNameMaxH:
Name=Name[:lNameMaxH-2]+'....'+Name[lName-lNameMaxH+2:]
Name=Name.replace('°','|')
return Name
def plotDfAlarmEreignisse(
dfAlarmEreignisse=pd.DataFrame()
,sortBy=[]
,replaceTup=('2021-','')
,replaceTuptD=('0 days','')
):
"""
Returns the plt.table
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=dfAlarmEreignisse[['Nr','LDSResBaseType','Voralarm','Type','NrSD','tA','tE','tD','ZHKNR','Name','Orte','tD_ZHKNR','NrName','NrSEGName','SEGName','BZKat']].copy()
df['tA']=df['tA'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
df['tE']=df['tE'].apply(lambda x: str(x).replace(replaceTup[0],replaceTup[1]))
###df['Anz']=df['Orte'].apply(lambda x: len(x))
##df['Orte']=df['Orte'].apply(lambda x: str(x).replace('[','').replace(']','').replace("'",""))
df['Orte']=df['Orte'].apply(lambda x: str(x[0]))
df['LDSResBaseType']=df.apply(lambda row: "{:s} {:s} - {:d}".format(row['LDSResBaseType'],row['Type'],row['Voralarm']),axis=1)
df=df[['Nr','LDSResBaseType','NrSD','tA','tE','tD','ZHKNR','Name','NrName','NrSEGName','SEGName','tD_ZHKNR','Orte','BZKat']]
df.rename(columns={'LDSResBaseType':'ResTyp - Voralarm'},inplace=True)
df.rename(columns={'tD_ZHKNR':'ZHKZeit','Name':'ZHKName'},inplace=True)
###df['ZHKName']=df['ZHKName'].apply(lambda x: fCVDName(x))
####df['ZHKName']=df['Orte'].apply(lambda x: x[0])
df['NrSEGName (SEGName)']=df.apply(lambda row: "{!s:2s} ({!s:s})".format(row['NrSEGName'],row['SEGName']),axis=1)
df=df[['Nr','ResTyp - Voralarm','NrSD','tA','tD','ZHKNR'
,'Orte' #'ZHKName'
,'BZKat'
,'NrName','NrSEGName (SEGName)','ZHKZeit']]
df.rename(columns={'Orte':'ID'},inplace=True)
df['tD']=df['tD'].apply(lambda x: str(x).replace(replaceTuptD[0],replaceTuptD[1]))
def fGetZHKNRStr(row,dfOrig):
"""
returns:
ZHKNStr in Abhängigkeit der aktuellen Zeile und dfOrig
"""
s=dfOrig[dfOrig['Nr']==row['Nr']].iloc[0]
if len(s.ZHKNRn)>1:
if len(s.ZHKNRn)==2:
return "{:d} ({!s:s})".format(row['ZHKNR'],s.ZHKNRn[1:])
else:
return "{:d} (+{:d})".format(row['ZHKNR'],len(s.ZHKNRn)-1)
else:
return "{:d}".format(row['ZHKNR'])
df['ZHKNR']=df.apply(lambda row: fGetZHKNRStr(row,dfAlarmEreignisse),axis=1)
if sortBy!=[]:
df=df.sort_values(by=sortBy)
t=plt.table(cellText=df.values, colLabels=df.columns
,colWidths=[.03,.1 # Nr ResTyp-Voralarm
,.04 # NrSD
,.08,.08 # tA tD
,.085 # ZHKNR
,.1125,.07 #.1125 # ID BZKat
,.04 # NrName
,.14 # NrSEGName (SEGName)
,.2125] # ZHKZeit
, cellLoc='left'
, loc='center')
t.auto_set_font_size(False)
t.set_fontsize(10)
cols=df.columns.to_list()
#colIdxOrte=cols.index('Orte')
#colIdxName=cols.index('ZHKName')
colIdxNrSD=cols.index('NrSD')
colIdxNrSEG=cols.index('NrSEGName (SEGName)')
# ResTyp - Voralarm
colIdxResTypVA=cols.index('ResTyp - Voralarm')
cells = t.properties()["celld"]
for cellTup,cellObj in cells.items():
cellObj.set_text_props(ha='left')
row,col=cellTup # row: 0 fuer Ueberschrift bei Ueberschrift; col mit 0
#if col == colIdxName:
# cellObj.set_text_props(ha='left')
if col == colIdxNrSD:
if row > 0:
if dfAlarmEreignisse.loc[row-1,'LDSResBaseType']=='SEG':
cellObj.set_text_props(backgroundcolor='lightsteelblue')
else:
cellObj.set_text_props(backgroundcolor='plum')
elif col == colIdxNrSEG:
if row==0:
continue
if 'color' in dfAlarmEreignisse.columns.to_list():
color=dfAlarmEreignisse['color'].iloc[row-1]
cellObj.set_text_props(backgroundcolor=color)
elif col == colIdxResTypVA and row > 0:
pass
if dfAlarmEreignisse.loc[row-1,'Voralarm'] in [10]:
cellObj.set_text_props(backgroundcolor='sandybrown')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [4]:
cellObj.set_text_props(backgroundcolor='pink')
elif dfAlarmEreignisse.loc[row-1,'Voralarm'] in [3]:
cellObj.set_text_props(backgroundcolor='lightcoral')
else:
pass
#cellObj.set_text_props(fontsize=16)
plt.axis('off')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return t
def plotDfAlarmStatistikReportsSEGErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,SEGResDct={}
,timeStart=None,timeEnd=None
,SEGErgsFile='SEGErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H' # Runden (1 Stunde)
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with FörderZeitenAlAnz>0
1 Base Plot and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr)
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
logger.debug("{0:s}timeStart (ohne timeShift): {1:s} timeEnd (ohne timeShift): {2:s}".format(logStr,str(timeStart),str(timeEnd)))
xlimsDct={}
pdf=PdfPages(SEGErgsFile)
(fileNameBase,ext)= os.path.splitext(SEGErgsFile)
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,row['SEGResIDBase'])
if row['FörderZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("{:s}: FörderZeitenAlAnz: 0".format(titleStr))
continue # keine SEGs ohne Alarme drucken
# Erg lesen
ResIDBase=row['SEGResIDBase']
dfSegReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='SEG',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
ID='AL_S'
if ID not in dfSegReprVec.keys():
continue
idxSEGPlotted=idxSEGPlotted+1
xlimsDct[ResIDBase]=[]
logger.debug("{:s}ResIDBase: {:s} dfSegReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfSegReprVec.columns.to_list()))
# Plot Basis ###########################################################
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
,plotLegend=True
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
if row['FörderZeitenAlAnz'] > 0:
if row['FörderZeitenAlAnz'] <= 3:
txtNr=" Nrn.: {!s:s}".format(row['FörderZeitenAlNrn'])
else:
txtNr=" Nrn.: {!s:s} u.w.".format(row['FörderZeitenAlNrn'][0])
txt=txt+txtNr
else:
txtNr=''
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,txtNr.replace('Nrn.: ','Nrn ').replace(',','').replace('[','').replace(']','').replace('u.w.','u w'))
plt.savefig(fileName)
plt.show()
###plt.clf()
plt.close()
# Plot Alarme ###########################################################
dct=SEGResDct[row['SEGResIDBase']]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['FörderZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
# wenn AlarmRand - PlotRand < 3 Minuten: um 3 Minuten erweitern
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['FörderZeit']==0:
backgroundcolor='lightgrey'
else: # hat Förderzeit
if row['FörderZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['FörderZeitAl']/row['FörderZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: FörderZeit: {:8.2f} FörderZeitenAlAnz: {:d}".format(row['SEGNodes'],row['FörderZeit'],row['FörderZeitenAlAnz'])
txtNr=" Nr.: {!s:s}".format(AlNr)
txt=txt+txtNr
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['FörderZeitSt']>0:
backgroundcolor='white'
if row['FörderZeitSt']/row['FörderZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: FörderZeitSt: {:8.2f})".format(row['FörderZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
ax.set_title( titleStr,loc='left')
#logger.info("{:s}".format(titleStr))
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
#(fileName,ext)= os.path.splitext(SEGErgsFile)
fileNameAlarm="{:s} {:s}.png".format(fileName.replace('.png','')
,txtNr.replace('Nr.: ','Nr ').replace(',','').replace('[','').replace(']',''))
plt.savefig(fileNameAlarm)
plt.show()
###plt.clf()
plt.close()
###plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotDfAlarmStatistikReportsDruckErgs(
h5File='a.h5'
,dfAlarmStatistik=pd.DataFrame()
,dfSegsNodesNDataDpkt=pd.DataFrame()
,DruckResDct={}
,timeStart=None,timeEnd=None
,DruckErgsFile='DruckErgs.pdf'
,stopAtSEGNr=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,byhour=[0,3,6,9,12,15,18,21]
,byminute=None
,bysecond=None
,timeFloorCeilStr=None #'1H'
,timeFloorCeilStrDetailPre='6T' # Runden (3 Minuten)
,timeFloorCeilStrDetailPost='3T'
,timeShiftPair=None
):
"""
Creates PDF for all SEGs with RuheZeitenAlAnz>0
1 Base Plot for a Druck with an Alarm and Detail Plots for the Alarms
Creates corresponding Single-PNGs
Returns xlimsDct:
key: BaseID
value: list of Timepairs of the Detail Plots for the Alarms
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
lx=Lx.AppLog(h5File=h5File)
firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal=lx.getTotalLogTime()
logger.debug("{0:s}firstTime (ohne TimeShift): {1:s} lastTime (ohne TimeShift): {2:s}".format(logStr,str(firstTime),str(lastTime)))
if timeStart==None:
if timeFloorCeilStr != None:
timeStart = firstTime.floor(freq=timeFloorCeilStr) # https://stackoverflow.com/questions/35339139/where-is-the-documentation-on-pandas-freq-tags
else:
timeStart = firstTime
if timeEnd==None:
if timeFloorCeilStr != None:
timeEnd = lastTime.ceil(freq=timeFloorCeilStr)
else:
timeEnd = lastTime
if timeShiftPair != None:
(preriod,freq)=timeShiftPair
timeDeltaStr="{:d} {:s}".format(preriod,freq)
timeDelta=pd.Timedelta(timeDeltaStr)
else:
timeDelta=pd.Timedelta('0 Seconds')
logger.debug("{0:s}timeStart abgerundet (ohne TimeShift): {1:s} timeEnd aufgerundet (ohne TimeShift): {2:s} TimeShift: {3:s}".format(logStr
,str(timeStart)
,str(timeEnd)
,str(timeDelta)))
xlimsDct={}
pdf=PdfPages(DruckErgsFile)
(fileNameBase,ext)= os.path.splitext(DruckErgsFile)
# über alle Segmente der Alarmstatistik (die DruckIDs sollen in der Reihenfolge der Alarmstatistik abgearbeitet werden)
idxSEGPlotted=0
for idx,(index,row) in enumerate(dfAlarmStatistik.iterrows()):
if row['RuheZeitenAlAnz']==0: # and row['RuheZeitenAlAnz']==0:
logger.info("LfdNr {:2d} - {:s}: {:s}: RuheZeitenAlAnz: 0".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']))
continue # keine SEGs ohne Alarme drucken
if stopAtSEGNr != None:
if idxSEGPlotted>=stopAtSEGNr:
break
idxSEGPlotted=idxSEGPlotted+1
# DruckIDs eines Segmentes
DruckIDs=sorted([ID for ID in dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGName']==row['SEGName']]['DruckResIDBase'].unique() if not pd.isnull(ID)])
for idxDruckID,DruckResIDBase in enumerate(DruckIDs):
dct=DruckResDct[DruckResIDBase]
if len(dct['Alarm'])==0:
# nur DruckIDs mit Alarmen plotten
continue
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
# Erg lesen
ResIDBase=DruckResIDBase
dfDruckReprVec=getLDSResVecDf(ResIDBase=ResIDBase,LDSResBaseType='Druck',lx=lx,timeStart=timeStart,timeEnd=timeEnd,timeShiftPair=timeShiftPair)
logger.debug("{:s}ResIDBase: {:s} dfDruckReprVec: Spalten: {!s:s}".format(logStr,ResIDBase,dfDruckReprVec.columns.to_list()))
logger.debug("{:s}ID: {:s}: timeStart (mit TimeShift): {:s} timeEnd (mit TimeShift): {:s}".format(logStr
,DruckResIDBase
,str(dfDruckReprVec.index[0])
,str(dfDruckReprVec.index[-1])
))
ID='AL_S'
if ID not in dfDruckReprVec.keys():
continue
xlimsDct[ResIDBase]=[]
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStart+timeDelta,timeEnd+timeDelta)
,dateFormat=dateFormat
,byhour=byhour
,byminute=byminute
,bysecond=bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten))
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# png
fileName="{:s} {:2d} - {:s} {:s} {:s}.png".format(fileNameBase
,int(row.Nr)+1
,str(row.DIVPipelineName)
,row['SEGName']
,fOrteStripped('Druck',[DruckResIDBase])[0]
)
plt.savefig(fileName)
plt.show()
pdf.savefig(fig)
plt.close()
# Plot Alarme ###########################################################
dct=DruckResDct[DruckResIDBase]
timeFirstAlarmStarts,dummy=dct['Alarm'][0]
dummy,timeLastAlarmEnds=dct['Alarm'][-1]
for idxAl,AlNr in enumerate(row['RuheZeitenAlNrn']):
timeAlarmStarts,timeAlarmEnds=dct['Alarm'][idxAl]
timeStartDetail = timeAlarmStarts.floor(freq=timeFloorCeilStrDetailPre)
timeEndDetail = timeAlarmEnds.ceil(freq=timeFloorCeilStrDetailPost)
if timeAlarmStarts-timeStartDetail<pd.Timedelta('3 Minutes'):
timeStartDetail=timeStartDetail-pd.Timedelta('3 Minutes')
if timeEndDetail-timeAlarmEnds<pd.Timedelta('3 Minutes'):
timeEndDetail=timeEndDetail+pd.Timedelta('3 Minutes')
xlimsDct[ResIDBase].append((timeStartDetail,timeEndDetail))
fig=plt.figure(figsize=DINA4q,dpi=dpiSize)
ax=fig.gca()
pltLDSErgVec(
ax
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=dfDruckReprVec # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=(timeStartDetail,timeEndDetail) # wenn die dct-Zeiten time-geshifted sind ist das korrekt
,dateFormat=dateFormat
,byhour=None#byhour
,byminute=list(np.arange(0,60))#byminute
,bysecond=None#bysecond
)
backgroundcolor='white'
if row['RuheZeit']==0:
backgroundcolor='lightgrey'
else: # hat Ruhezeit
if row['RuheZeitenAlAnz']==0:
backgroundcolor='springgreen'
else:
backgroundcolor='navajowhite'
if row['RuheZeitAl']/row['RuheZeit']*100>1:
backgroundcolor='tomato'
txt="SEG: {:s}: LfdNr {:2d}: RuheZeit: {:8.2f} RuheZeitenAlAnz: {:d}".format(
row['SEGNodes']
,int(row.Nr)+1
,row['RuheZeit']
,row['RuheZeitenAlAnz'])
ax.text(.98, .1,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
if row['RuheZeitSt']>0:
backgroundcolor='white'
if row['RuheZeitSt']/row['RuheZeit']*100>1:
backgroundcolor='goldenrod'
txt="(SEG: RuheZeitSt: {:8.2f})".format(row['RuheZeitSt'])
ax.text(.98, .05,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor=backgroundcolor,
transform=ax.transAxes)
RuheZeiten=DruckResDct[DruckResIDBase]['Zustaendig']
RuheZeit=fTotalTimeFromPairs(RuheZeiten,pd.Timedelta('1 minute'),False)
AlarmZeiten=DruckResDct[DruckResIDBase]['Alarm']
AlarmZeit=fTotalTimeFromPairs(AlarmZeiten,pd.Timedelta('1 minute'),False)
RuheZeitenSt=DruckResDct[DruckResIDBase]['Stoerung']
RuheZeitSt=fTotalTimeFromPairs(RuheZeitenSt,pd.Timedelta('1 minute'),False)
txt="Druck: RuheZeit: {:8.2f} (davon St: {:8.2f}) RuheZeitenAlAnz: {:3d} Nr. {:4d}".format(
RuheZeit
,RuheZeitSt
,len(AlarmZeiten)
,AlNr)
ax.text(.98, .15,txt,
horizontalalignment='right',
verticalalignment='center',
backgroundcolor='white',
transform=ax.transAxes)
titleStr="LfdNr {:2d} - {:s}: {:s}: {:s}".format(
int(row.Nr)+1
,str(row.DIVPipelineName)
# ,row['SEGNodes']
,row['SEGName']
,DruckResIDBase)
ax.set_title( titleStr,loc='left')
fig.tight_layout(pad=2.)
# PDF
pdf.savefig(fig)
# png
fileNameAlarm="{:s} Nr {:d}.png".format(fileName.replace('.png',''),AlNr)
plt.savefig(fileNameAlarm)
plt.show()
plt.close()
#plt.close()
pdf.close()
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlimsDct
def plotTimespans(
xlims # list of sections
,orientation='landscape' # oben HYD unten LDS; 'portrait': # links HYD rechts LDS
,pad=3.5 # tight_layout() can take keyword arguments of pad, w_pad and h_pad. These control the extra padding around the figure border and between subplots. The pads are specified in fraction of fontsize.
,w_pad=0.5
,h_pad=0.5
# 'portrait' # links HYD rechts LDS
,rectSpalteLinks=[0, 0, 0.5, 1]
,rectSpalteRechts=[0.325, 0, 1, 1]
# 'landscape': # oben HYD unten LDS
,rectZeileOben=[0, .5, 1, 1]
,rectZeileUnten=[0, 0, 1, .5]
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,figTitle='' #!
,figSave=False #!
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,sectionTitlesLDS=None # list of section titles to be used
,sectionTextsLDS=None # list of section texts to be used
,vLinesX=[] # plotted in each HYD section if X-time fits
,hLinesY=[] # plotted in each HYD section
,vAreasX=[] # for each HYD section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXLDS=None # plotted in each LDS section if X-time fits
,vAreasXLDS=None # for each LDS section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
,vLinesXColorLDS=None
,vAreasXColorLDS=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# --- Args Fct. HYD ---:
,TCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,TCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,TCsOPCScenTimeShift=pd.Timedelta('1 hour')
,TCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,TCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,TCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,TCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={}
,pDct={}
,QDctOPC={}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={}
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
# --- Args Fct. LDS ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
,ylimAL=ylimALD
,yticksAL=yticksALD
,ylimR=ylimRD #can be a list #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False # can be a list #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD # can be a list of lists #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
fig=plt.gcf()
if orientation=='landscape':
# oben HYD unten LDS
gsHYD = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.ncols)]
gsLDS = gridspec.GridSpec(1,len(xlims),figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.ncols)]
else:
# links HYD rechts LDS
gsHYD = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstHYD=[fig.add_subplot(gsHYD[idx]) for idx in np.arange(gsHYD.nrows)]
gsLDS = gridspec.GridSpec(len(xlims),1,figure=fig)
axLstLDS=[fig.add_subplot(gsLDS[idx]) for idx in np.arange(gsLDS.nrows)]
pltLDSpQAndEventsResults=plotTimespansHYD(
axLst=axLstHYD
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitles
,sectionTexts=sectionTexts
,vLinesX=vLinesX
,hLinesY=hLinesY
,vAreasX=vAreasX
,vLinesXColor=vLinesXColor
,vAreasXColor=vAreasXColor
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfTCsLDSIn=TCsLDSIn
,dfTCsOPC=TCsOPC
,dfTCsOPCScenTimeShift=TCsOPCScenTimeShift
,dfTCsSIDEvents=TCsSIDEvents
,dfTCsSIDEventsTimeShift=TCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=TCsSIDEventsInXlimOnly
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
,yGridSteps=yGridSteps
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
)
if orientation=='landscape':
# oben HYD unten LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileOben)
else:
# links HYD rechts LDS
gsHYD.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteLinks)
if sectionTitlesLDS==None:
sectionTitlesLDS=sectionTitles
if sectionTextsLDS==None:
sectionTextsLDS=sectionTexts
if vLinesXLDS==None:
vLinesXLDS=vLinesX
if vAreasXLDS==None:
vAreasXLDS=vAreasX
if vLinesXColorLDS==None:
vLinesXColorLDS=vLinesXColor
if vAreasXColorLDS==None:
vAreasXColorLDS=vAreasXColor
pltLDSErgVecResults=plotTimespansLDS(
axLst=axLstLDS
,xlims=xlims
,figTitle=figTitle # ''
,figSave=figSave # False
,sectionTitles=sectionTitlesLDS
,sectionTexts=sectionTextsLDS
,vLinesX=vLinesXLDS
,vAreasX=vAreasXLDS
,vLinesXColor=vLinesXColorLDS
,vAreasXColor=vAreasXColorLDS
,plotLegend=plotLegend
,plotLegend1stOnly=plotLegend1stOnly
# --- Args Fct. ---:
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,ylimR=ylimR
,ylimRxlim=ylimRxlim
,yticksR=yticksR
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
# wenn weniger als 5 Achsen geplottet werden stimmt der erste Wert von rectSpalteRechts nicht
#(axes,lines)=pltLDSErgVecResults[0]
#
# numOfYAxes=len(axes)
#corFac=5-numOfYAxes
#rectSpalteRechtsCor=rectSpalteRechts #[0.325, 0, 1, 1]
#rectSpalteRechtsCor[0]=rectSpalteRechtsCor[0]+0.06*corFac
if orientation=='landscape':
# oben HYD unten LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectZeileUnten)
else:
# links HYD rechts LDS
gsLDS.tight_layout(fig, pad=pad,h_pad=h_pad,w_pad=w_pad, rect=rectSpalteRechts)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return gsHYD,gsLDS,pltLDSpQAndEventsResults,pltLDSErgVecResults
def plotTimespansHYD(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,hLinesY=[] # plotted in each section
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfTCsLDSIn=pd.DataFrame() # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorstehenden Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame() # es werden alle Schieberevents geplottet
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={ # Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSrc 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None#[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #[0,50,100] #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,yGridSteps=yGridStepsD
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSpQAndEvents selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
# plots pltLDSpQAndEvents-Sections
# returns a Lst of pltLDSpQAndEvents-Results, a Lst of (axes,lines,scatters)
try:
if sectionTitles==[] or sectionTitles==None:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansHYD sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSpQAndEventsResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
(axes,lines,scatters)=pltLDSpQAndEvents(
ax
,dfTCsLDSIn=dfTCsLDSIn
,dfTCsOPC=dfTCsOPC
,dfTCsOPCScenTimeShift=dfTCsOPCScenTimeShift
,dfTCsSIDEvents=dfTCsSIDEvents
,dfTCsSIDEventsTimeShift=dfTCsSIDEventsTimeShift
,dfTCsSIDEventsInXlimOnly=dfTCsSIDEventsInXlimOnly
,dfTCsSIDEventsyOffset=dfTCsSIDEventsyOffset
,QDct=QDct
,pDct=pDct
,QDctOPC=QDctOPC
,pDctOPC=pDctOPC
,attrsDct=attrsDct
,fctsDct=fctsDct
,xlim=xlim
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,plotRTTM=plotRTTM
,ylimp=ylimp
,ylabelp=ylabelp
,yticksp=yticksp
,ylimQ=ylimQ
,yticksQ=yticksQ
# 3. Achse
,ylim3rd=ylim3rd
,yticks3rd=yticks3rd
,yGridSteps=yGridSteps
,plotLegend=plotLegendFct
,baseColorsDef=baseColorsDef
)
pltLDSpQAndEventsResults.append((axes,lines,scatters))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
for hLineY in hLinesY:
ax.axhline(y=hLineY,xmin=0, xmax=1,color='gray',ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly:
legendHorizontalPos='center' # wenn nur 1x Legende dann Mitte
if plotLegend1stOnly and idx>0:
pass
else:
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSpQAndEventsResults
def plotTimespansLDS(
axLst # list of axes to be used
,xlims # list of sections
,figTitle='' # the title of the plot; will be extended by min. and max. time calculated over all sections; will be also the pdf and png fileName
,figSave=False #True # creates pdf and png
,sectionTitles=[] # list of section titles to be used
,sectionTexts=[] # list of section texts to be used
,vLinesX=[] # plotted in each section if X-time fits
,vAreasX=[] # for each section a list of areas to highlight i.e. [[(timeStartAusschnittDruck,timeEndAusschnittDruck),...],...]
,vLinesXColor='gray'
,vAreasXColor='whitesmoke'
# --- Args Fct. ---:
,dfSegReprVec=pd.DataFrame()
,dfDruckReprVec=pd.DataFrame()
#,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S' # can be a list
,bysecond=None #[0,15,30,45] # can be a list
,byminute=None # can be a list
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD
,yTwinedAxesPosDeltaHPStart=-0.0125
,yTwinedAxesPosDeltaHP=-0.0875
,ylimR=ylimRD # can be a list
,ylimRxlim=False # can be a list
,yticksR=yticksRD # can be a list
# dito Beschl.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9
,plotLegend=True # interpretiert fuer diese Funktion; Inverse gilt fuer pltLDSErgVec selbst
,plotLegend1stOnly=True # diese Funktion plottet wenn plotLegend=True die Legende nur im ersten Plot
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
# plots pltLDSErgVec-Sections
# returns a Lst of pltLDSErgVec-Results, a Lst of (axes,lines)
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if sectionTitles==[] or sectionTitles ==None:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if not isinstance(sectionTitles, list):
logger.warning("{0:s}sectionTitles muss eine Liste von strings sein.".format(logStr))
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if len(sectionTitles)!=len(xlims):
logger.debug("{0:s}sectionTitles muss dieselbe Laenge haben wie xlims.".format(logStr))
if len(sectionTitles) == 1:
sectionTitles=len(xlims)*[sectionTitles[0]]
else:
sectionTitles=len(xlims)*['a plotTimespansLDS sectionTitle Praefix']
if sectionTexts==[] or sectionTexts==None:
sectionTexts=len(xlims)*['']
if not isinstance(sectionTexts, list):
logger.warning("{0:s}sectionTexts muss eine Liste von strings sein.".format(logStr))
sectionTexts=len(xlims)*['']
if len(sectionTexts)!=len(xlims):
logger.warning("{0:s}sectionTexts muss dieselbe Laenge haben wie xlims.".format(logStr))
sectionTexts=len(xlims)*['']
if plotLegend:
plotLegendFct=False
else:
plotLegendFct=True
pltLDSErgVecResults=[]
for idx,xlim in enumerate(xlims):
ax = axLst[idx]
if isinstance(dateFormat, list):
dateFormatIdx=dateFormat[idx]
else:
dateFormatIdx=dateFormat
bysecondIdx=bysecond
if isinstance(bysecond, list):
if any(isinstance(el, list) for el in bysecond):
bysecondIdx=bysecond[idx]
byminuteIdx=byminute
if isinstance(byminute, list):
if any(isinstance(el, list) for el in byminute):
byminuteIdx=byminute[idx]
byhourIdx=byhour
if isinstance(byhour, list):
if any(isinstance(el, list) for el in byhour):
byhourIdx=byhour[idx]
ylimRIdx=ylimR
if isinstance(ylimR, list):
ylimRIdx=ylimR[idx]
ylimRxlimIdx=ylimRxlim
if isinstance(ylimRxlim, list):
ylimRxlimIdx=ylimRxlim[idx]
yticksRIdx=yticksR
if isinstance(yticksR, list):
if any(isinstance(el, list) for el in yticksR):
yticksRIdx=yticksR[idx]
(axes,lines)=pltLDSErgVec(
ax
,dfSegReprVec=dfSegReprVec
,dfDruckReprVec=dfDruckReprVec
,xlim=xlims[idx]
,dateFormat=dateFormatIdx
,bysecond=bysecondIdx
,byminute=byminuteIdx
,byhour=byhourIdx
,ylimAL=ylimAL
,yticksAL=yticksAL
,yTwinedAxesPosDeltaHPStart=yTwinedAxesPosDeltaHPStart
,yTwinedAxesPosDeltaHP=yTwinedAxesPosDeltaHP
,ylimR=ylimRIdx
,ylimRxlim=ylimRxlimIdx
,yticksR=yticksRIdx
,ylimAC=ylimAC
,ylimACxlim=ylimACxlim
,yticksAC=yticksAC
,ySpanMin=ySpanMin
,plotLegend=plotLegendFct
,legendLoc=legendLoc
,legendFramealpha=legendFramealpha
,legendFacecolor=legendFacecolor
,attrsDctLDS=attrsDctLDS
,plotLPRate=plotLPRate
,plotR2FillSeg=plotR2FillSeg
,plotR2FillDruck=plotR2FillDruck
,plotAC=plotAC
,plotACCLimits=plotACCLimits
,highlightAreas=highlightAreas
,Seg_Highlight_Color=Seg_Highlight_Color
,Seg_Highlight_Alpha=Seg_Highlight_Alpha
,Seg_Highlight_Fct=Seg_Highlight_Fct
,Seg_HighlightError_Color=Seg_HighlightError_Color
,Seg_Highlight_Alpha_Error=Seg_Highlight_Alpha_Error #
,Seg_HighlightError_Fct=Seg_HighlightError_Fct
,Druck_Highlight_Color=Druck_Highlight_Color
,Druck_Highlight_Alpha=Druck_Highlight_Alpha
,Druck_Highlight_Fct=Druck_Highlight_Fct
,Druck_HighlightError_Color=Druck_HighlightError_Color
,Druck_Highlight_Alpha_Error=Druck_Highlight_Alpha_Error #
,Druck_HighlightError_Fct=Druck_HighlightError_Fct
,plotTV=plotTV
,plotTVTimerFct=plotTVTimerFct
,plotTVAmFct=plotTVAmFct
,plotTVAmLabel=plotTVAmLabel
,ylimTV=ylimTV
,yticksTV=yticksTV
,plotDPDT=plotDPDT
,plotSB_S=plotSB_S
)
pltLDSErgVecResults.append((axes,lines))
sectionText=sectionTexts[idx]
ax.text(
0.5, 0.5,
sectionText,
ha='center', va='top',
transform=ax.transAxes
)
(timeStart,timeEnd)=xlim
sectionTitleSingle="{:s}: Plot Nr. {:d} - Zeitspanne: {:s}".format(sectionTitles[idx],idx+1,str(timeEnd-timeStart)).replace('days','Tage')
ax.set_title(sectionTitleSingle)
for vLineX in vLinesX:
if vLineX >= timeStart and vLineX <= timeEnd:
ax.axvline(x=vLineX,ymin=0, ymax=1, color=vLinesXColor,ls=linestyle_tuple[11][1])
if len(vAreasX) == len(xlims):
vAreasXSection=vAreasX[idx]
if vAreasXSection==[] or vAreasXSection==None:
pass
else:
for vArea in vAreasXSection:
ax.axvspan(vArea[0], vArea[1], alpha=0.6, color=vAreasXColor)
else:
if len(vAreasX)>0:
logger.warning("{0:s}vAreasX muss dieselbe Laenge haben wie xlims.".format(logStr))
# Legend
if plotLegend:
legendHorizontalPos='center'
if len(xlims)>1:
if idx in [0,2,4]: # Anfahren ...
legendHorizontalPos='right'
elif idx in [1,3,5]: # Abfahren ...
legendHorizontalPos='left'
if plotLegend1stOnly and idx>0:
pass
else:
if not dfSegReprVec.empty:
patternSeg='Seg$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternSeg,line) != None])
,tuple([line for line in lines if re.search(patternSeg,line) != None])
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if not dfDruckReprVec.empty:
patternDruck='Drk$'
axes['A'].add_artist(axes['A'].legend(
tuple([lines[line] for line in lines if re.search(patternDruck,line) != None])
,tuple([line for line in lines if re.search(patternDruck,line) != None])
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
# Titel
tMin=xlims[0][0]
tMax=xlims[-1][1]
for tPair in xlims:
(t1,t2)=tPair
if t1 < tMin:
tMin=t1
if t2>tMax:
tMax=t2
if figTitle not in ['',None]:
figTitle="{:s} - {:s} - {:s}".format(figTitle,str(tMin),str(tMax)).replace(':',' ')
fig=plt.gcf()
fig.suptitle(figTitle)
# speichern?!
if figSave:
fig.tight_layout(pad=2.) # gs.tight_layout(fig,pad=2.)
plt.savefig(figTitle+'.png')
plt.savefig(figTitle+'.pdf')
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return pltLDSErgVecResults
def pltLDSpQAndEvents(
ax
,dfTCsLDSIn # es werden nur die aDct-definierten geplottet
,dfTCsOPC=pd.DataFrame() # es werden nur die aDctOPC-definierten geplottet
# der Schluessel in den vorgenannten Dcts ist die ID (der Spaltenname) in den TCs
,dfTCsOPCScenTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEvents=pd.DataFrame()
,dfTCsSIDEventsTimeShift=pd.Timedelta('1 hour')
,dfTCsSIDEventsInXlimOnly=True # es werden nur die Spalten geplottet, die in xlim vorkommen und dort mindestens 1x nicht Null sind (sonst sind alle (zumindest in der Legende) dargestellt)
,dfTCsSIDEventsyOffset=.05 # die y-Werte werden ab dem 1. Schieber um je dfTCsSIDEventsyOffset erhöht (damit zeitgleiche Events besser sichtbar werden)
,QDct={ # Exanple
'Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value':{'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,'Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value':{'IDPlt':'Q Snk','RTTM':'IMDI.Objects.FBG_MESSW.6_TUD_39_FT_01.In.MW.value'}
}
,pDct={# Example
'Objects.FBG_HPS_M.6_KED_39_PTI_01_E.In.MW.value':{'IDPlt':'p Src'}
,'Objects.FBG_HPS_M.6_TUD_39_PTI_01_E.In.MW.value':{'IDPlt':'p Snk'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_01_E.In.MW.value':{'IDPlt':'p ISrc 1'}
,'Objects.FBG_HPS_M.6_EL1_39_PTI_02_E.In.MW.value':{'IDPlt':'p ISnk 2'}
}
,QDctOPC={ # Exanple
'Objects.FBG_MESSW.6_EL1_39_FT_01.In.MW.value':{'IDPlt':'Q xSnk 1'}
}
,pDctOPC={}
,IDPltKey='IDPlt' # Schluesselbezeichner in den vorstehenden 4 Dcts; Wert ist Referenz auf das folgende Layout-Dct und das folgende Fcts-Dct; Werte muessen eindeutig sein
,attrsDct=attrsDct
,fctsDct={} # a Dct with Fcts
,xlim=None
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,plotRTTM=True # plot RTTM-Echoes
# p y-Achse
,ylimp=ylimpD #wenn undef., dann min/max
,ylimpxlim=False #wenn Wahr und ylim undef., dann wird xlim beruecksichtigt bei min/max
,yticksp=None #wenn undef., dann aus ylimp
,ylabelp='[bar]'
# Q y-Achse
,ylimQ=ylimQD
,ylimQxlim=False
,yticksQ=None #wenn undef., dann aus ylimQ
,ylabelQ='[Nm³/h]'
# 3. Achse
,ylim3rd=ylim3rdD
,yticks3rd=yticks3rdD
,ylabel3rd='Schieber (ZUSTände 0,1,2 jew. + x; Befehle)'
,yGridSteps=30 # 0: das y-Gitter besteht dann bei ylimp=ylimQ=yticksp=yticksQ None nur aus min/max (also 1 Gitterabschnitt)
,ySpanMin=0.9 # wenn ylim undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
# SchieberEvents
,pSIDEvents=pSIDEvents
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
,eventCCmds=eventCCmds
,eventCStats=eventCStats
,valRegExMiddleCmds=valRegExMiddleCmds # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
# es muessen soviele Farben definiert sein wie Schieber
,baseColorsDef=baseColorsSchieber
,markerDef=markerDefSchieber
):
"""
zeichnet pq-Zeitkurven - ggf. ergaenzt durch Events
Returns:
* axes (Dct of axes)
* lines (Dct of lines)
* scatters (List of ax.scatter-Results)
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
lines={}
scatters=[]
try:
axes['p']=ax
# x-Achse ----------------
if xlim == None:
xlimMin=dfTCsLDSIn.index[0]
xlimMax=dfTCsLDSIn.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}dfTCsOPCScenTimeShift: {1:s}".format(logStr,str(dfTCsOPCScenTimeShift)))
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# Eindeutigkeit der IDPlts pruefen
keys=[]
keysUneindeutig=[]
for dct in [QDct,pDct,QDctOPC,pDctOPC]:
for key, value in dct.items():
if IDPltKey in value.keys():
IDPltValue=value[IDPltKey]
if IDPltValue in keys:
print("IDPlt {:s} bereits vergeben".format(IDPltValue))
keysUneindeutig.append(IDPltValue)
else:
keys.append(IDPltValue)
# 1. Achse p -----------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p'))
for key, value in pDct.items(): # nur die konfigurierten IDs plotten
if key in dfTCsLDSIn.columns: # nur dann, wenn ID als Spalte enthalten
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=key # Spaltenname
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct # a Dct with - i.e. {'Q Src':{'color':'red'},...}
,IDPltKey=IDPltKey # Schluesselbezeichner in value
,IDPltValuePostfix=None
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('1 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys():
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 1. Achse p OPC'))
for key, value in pDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
ylimp,yticksp=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,pDct.keys()
,ylim=ylimp
,yticks=yticksp
,ylimxlim=ylimpxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax.set_ylim(ylimp)
ax.set_yticks(yticksp)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel(ylabelp)
# 2. y-Achse Q ----------------------------------------
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q'))
ax2 = ax.twinx()
axes['Q']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
for key, value in QDct.items():
if key in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=key
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
# ,timeShift=pd.Timedelta('0 hour') # pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
if 'RTTM' in value.keys() and plotRTTM:
if value['RTTM'] in dfTCsLDSIn.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsLDSIn
,ID=value['RTTM']
,xDctValue=value # a Dct - i.e. {'IDPlt':'Q Src','RTTM':'IMDI.Objects.FBG_MESSW.6_KED_39_FT_01.In.MW.value'}
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=' RTTM'
,xDctFcts=fctsDct
#,timeShift=pd.Timedelta('0 hour') #pd.Timedelta('0 seconds')
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,value['RTTM']))
if not dfTCsOPC.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 2. Achse Q OPC'))
for key, value in QDctOPC.items():
if key in dfTCsOPC.columns:
label, linesAct = pltLDSpQHelper(
ax2
,TCdf=dfTCsOPC
,ID=key
,xDctValue=value
,xDctAttrs=attrsDct
,IDPltKey=IDPltKey
,IDPltValuePostfix=None
,xDctFcts=fctsDct
,timeShift=dfTCsOPCScenTimeShift
)
lines[label]=linesAct[0]
else:
logger.debug("{0:s}Spalte {1:s} gibt es nicht. Weiter.".format(logStr,key))
pltLDSHelperY(ax2)
ylimQ,yticksQ=pltLDSpQHelperYLimAndTicks(
dfTCsLDSIn
,QDct.keys()
,ylim=ylimQ
,yticks=yticksQ
,ylimxlim=ylimQxlim
,xlim=xlim
,ySpanMin=ySpanMin
,yGridSteps=yGridSteps
)
ax2.set_ylim(ylimQ)
ax2.set_yticks(yticksQ)
ax2.grid()
ax2.set_ylabel(ylabelQ)
# ggf. 3. Achse
if not dfTCsSIDEvents.empty:
logger.debug("{0:s}{1:s}".format(logStr,'# 3. Achse SID'))
ax3 = ax.twinx()
axes['SID']=ax3
pltHelperX(
ax3
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+2*yTwinedAxesPosDeltaHP
)
if dfTCsSIDEventsInXlimOnly:
# auf xlim beschränken
dfTCsSIDEventsPlot=dfTCsSIDEvents[
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
(dfTCsSIDEvents.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
# weiter beschränken auf die, die in xlim mind. 1 Eintrag haben
dfTCsSIDEventsPlot=dfTCsSIDEventsPlot.dropna(axis=1,how='all')
else:
dfTCsSIDEventsPlot=dfTCsSIDEvents
# doppelte bzw. mehrfache Spaltennamen eliminieren (das waere ein Aufruf-Fehler)
dfTCsSIDEventsPlot = dfTCsSIDEventsPlot.loc[:,~dfTCsSIDEventsPlot.columns.duplicated()]
logger.debug("{:s}dfTCsSIDEventsPlot.dropna(how='all'): {:s}".format(logStr,dfTCsSIDEventsPlot.dropna(how='all').to_string()))
if not dfTCsSIDEventsPlot.dropna(how='all').empty: # mind. 1 Ereignis in irgendeiner Spalte muss ueberbleiben
# aus Performanzgruenden wird nur zum Plot gegeben, was in xlim auch zu sehen sein wird
dfTCsSIDEventsPlot2=dfTCsSIDEventsPlot[
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift>=xlim[0])
&#:
( dfTCsSIDEventsPlot.index-dfTCsSIDEventsTimeShift<=xlim[1])
]
labelsOneCall,scattersOneCall=pltLDSSIDHelper(
ax3
,dfTCsSIDEventsPlot2
,dfTCsSIDEventsTimeShift
,dfTCsSIDEventsyOffset
,pSIDEvents
,valRegExMiddleCmds
,eventCCmds
,eventCStats
,markerDef
,baseColorsDef
)
scatters=scatters+scattersOneCall
pltLDSHelperY(ax3)
ax3.set_ylim(ylim3rd)
ax3.set_yticks(yticks3rd)
ax3.set_ylabel(ylabel3rd)
if plotLegend:
legendHorizontalPos='center'
patterBCp='^p S[rc|nk]'
patterBCQ='^Q S[rc|nk]'
patterBCpQ='^[p|Q] S[rc|nk]'
linesp=[line for line in lines if re.search(patterBCp,line) != None]
linesQ=[line for line in lines if re.search(patterBCQ,line) != None]
linespTxt=tuple([lines[line] for line in linesp])
linesQTxt=tuple([lines[line] for line in linesQ])
moreLines=[line for line in lines if re.search(patterBCpQ,line) == None]
moreLinesp=[line for line in moreLines if re.search('^p',line) != None]
moreLinesQ=[line for line in moreLines if re.search('^Q',line) != None]
moreLinespTxt=tuple([lines[line] for line in moreLinesp])
moreLinesQTxt=tuple([lines[line] for line in moreLinesQ])
axes['p'].add_artist(axes['p'].legend(
linespTxt+moreLinespTxt
,linesp+moreLinesp
,loc='upper '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
axes['Q'].add_artist(axes['Q'].legend(
linesQTxt+moreLinesQTxt
,linesQ+moreLinesQ
,loc='lower '+legendHorizontalPos
,framealpha=legendFramealpha
,facecolor=legendFacecolor
))
if 'SID' in axes.keys() and len(scatters)>0:
if legendHorizontalPos == 'center':
legendHorizontalPosAct=''
else:
legendHorizontalPosAct=' '+legendHorizontalPos
axes['SID'].legend(loc='center'+legendHorizontalPosAct
,framealpha=legendFramealpha
,facecolor=legendFacecolor)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return axes,lines,scatters
def pltLDSErgVec(
ax=None # Axes auf die geplottet werden soll (und aus der neue axes ge-twinx-ed werden; plt.gcf().gca() wenn undef.
,dfSegReprVec=pd.DataFrame() # Ergebnisvektor SEG; pass empty Df if Druck only
,dfDruckReprVec=pd.DataFrame() # Ergebnisvektor DRUCK; pass empty Df if Seg only
,xlim=None # tuple (xmin,xmax); wenn undef. gelten min/max aus vorgenannten Daten als xlim; wenn Seg angegeben, gilt Seg
,dateFormat='%y.%m.%d: %H:%M:%S'
,bysecond=None #[0,15,30,45]
,byminute=None
,byhour=None
,ylimAL=ylimALD
,yticksAL=yticksALD #[0,10,20,30,40]
,yTwinedAxesPosDeltaHPStart=-0.0125 #: (i.d.R. negativer) Abstand der 1. y-Achse von der Zeichenfläche
,yTwinedAxesPosDeltaHP=-0.0875 #: (i.d.R. negativer) zus. Abstand jeder weiteren y-Achse von der Zeichenfläche
,ylimR=ylimRD #None #(-10,10) #wenn undef., dann min/max dfSegReprVec
,ylimRxlim=False #wenn Wahr und ylimR undef. (None), dann wird xlim beruecksichtigt bei min/max dfSegReprVec
,yticksR=yticksRD #[0,2,4,10,15,30,40] #wenn undef. (None), dann aus ylimR; matplotlib "vergrößert" mit dem Setzen von yTicks ein ebenfalls gesetztes ylim wenn die Ticks außerhalb des ylims liegen
# <NAME>.
,ylimAC=ylimACD
,ylimACxlim=False
,yticksAC=yticksACD
,ySpanMin=0.9 # wenn ylim R/AC undef. vermeidet dieses Maß eine y-Achse mit einer zu kleinen Differenz zwischen min/max
,plotLegend=True
,legendLoc='best'
,legendFramealpha=.2
,legendFacecolor='white'
,attrsDctLDS=attrsDctLDS
,plotLPRate=True
,plotR2FillSeg=True
,plotR2FillDruck=True
,plotAC=True
,plotACCLimits=True
,highlightAreas=True
,Seg_Highlight_Color='cyan'
,Seg_Highlight_Alpha=.1
,Seg_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Seg_HighlightError_Color='peru'
,Seg_Highlight_Alpha_Error=.3
,Seg_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,Druck_Highlight_Color='cyan'
,Druck_Highlight_Alpha=.1
,Druck_Highlight_Fct=lambda row: True if row['STAT_S']==101 else False
,Druck_HighlightError_Color='peru'
,Druck_Highlight_Alpha_Error=.3
,Druck_HighlightError_Fct=lambda row: True if row['STAT_S']==601 else False
,plotTV=True
,plotTVTimerFct=None
,plotTVAmFct=lambda x: x*100
,plotTVAmLabel=plotTVAmLabelD
,ylimTV=ylimTVD
,yticksTV=yticksTVD
,plotDPDT=True
,plotSB_S=True
):
"""
zeichnet Zeitkurven von App LDS Ergebnisvektoren auf ax
return: axes (Dct der Achsen), yLines (Dct der Linien)
Dct der Achsen: 'A': Alarm etc.; 'R': m3/h; 'a': ACC; 'TV': Timer und Leckvolumen
#! Lücken (nicht plotten) wenn keine Zeiten
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
axes={}
yLines={}
try:
if dfSegReprVec.empty and dfDruckReprVec.empty:
logger.error("{0:s}{1:s}".format(logStr,'dfSegReprVec UND dfDruckReprVec leer?! Return.'))
return
if not dfSegReprVec.empty:
# keine komplett leeren Zeilen
dfSegReprVec=dfSegReprVec[~dfSegReprVec.isnull().all(1)]
# keine doppelten Indices
dfSegReprVec=dfSegReprVec[~dfSegReprVec.index.duplicated(keep='last')] # dfSegReprVec.groupby(dfSegReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if not dfDruckReprVec.empty:
# keine komplett leeren Zeilen
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.isnull().all(1)]
# keine doppelten Indices
dfDruckReprVec=dfDruckReprVec[~dfDruckReprVec.index.duplicated(keep='last')] # dfDruckReprVec.groupby(dfDruckReprVec.index).last() # df[~df.index.duplicated(keep='last')]
if ax==None:
ax=plt.gcf().gca()
axes['A']=ax
# x-Achse ----------------
if xlim == None:
if not dfSegReprVec.empty:
xlimMin=dfSegReprVec.index[0]
xlimMax=dfSegReprVec.index[-1]
elif not dfDruckReprVec.empty:
xlimMin=dfDruckReprVec.index[0]
xlimMax=dfDruckReprVec.index[-1]
xlim=(xlimMin,xlimMax)
(xlimMin,xlimMax)=xlim
ax.set_xlim(xlim)
logger.debug("{0:s}bysecond: {1:s}".format(logStr,str(bysecond)))
logger.debug("{0:s}byminute: {1:s}".format(logStr,str(byminute)))
logger.debug("{0:s}byhour: {1:s}".format(logStr,str(byhour)))
logger.debug("{0:s}dateFormat: {1:s}".format(logStr,dateFormat))
pltHelperX(
ax
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart
)
# 1. Achse Alarm -----------------------
if not dfSegReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha, color=Seg_Highlight_Color)
tPairs=findAllTimeIntervalls(dfSegReprVec,Seg_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Seg_Highlight_Alpha_Error, color=Seg_HighlightError_Color)
if not dfDruckReprVec.empty and highlightAreas:
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_Highlight_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha, color=Druck_Highlight_Color)
tPairs=findAllTimeIntervalls(dfDruckReprVec,Druck_HighlightError_Fct)
for t1,t2 in tPairs:
ax.axvspan(t1, t2, alpha=Druck_Highlight_Alpha_Error, color=Druck_HighlightError_Color)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'AL_S',attrsDctLDS['Seg_AL_S_Attrs'])
yLines['AL_S Seg']=lines[0]
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'AL_S',attrsDctLDS['Druck_AL_S_Attrs'])
yLines['AL_S Drk']=lines[0]
if not dfSegReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfSegReprVec,'SB_S',attrsDctLDS['Seg_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Seg']=lines[0]
if not dfDruckReprVec.empty and plotSB_S:
lines = pltLDSErgVecHelper(ax,dfDruckReprVec,'SB_S',attrsDctLDS['Druck_SB_S_Attrs'],fct=lambda x: x*10)
yLines['SB_S Drk']=lines[0]
ax.set_ylim(ylimAL)
ax.set_yticks(yticksAL)
ax.grid()
ax.set_zorder(10)
ax.patch.set_visible(False)
ax.set_ylabel('A [0/10/20] u. 10x B [0/1/2/3/4]')
# 2. y-<NAME> ----------------------------------------
ax2 = ax.twinx()
axes['R']=ax2
pltHelperX(
ax2
,dateFormat=dateFormat
,bysecond=bysecond
,byminute=byminute
,byhour=byhour
,yPos=yTwinedAxesPosDeltaHPStart+yTwinedAxesPosDeltaHP
)
pltLDSHelperY(ax2)
if not dfSegReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'MZ_AV',attrsDctLDS['Seg_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LR_AV',attrsDctLDS['Seg_LR_AV_Attrs'])
yLines['LR_AV (R2) Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'NG_AV',attrsDctLDS['Seg_NG_AV_Attrs'])
yLines['NG_AV Seg']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'QM_AV',attrsDctLDS['Seg_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Seg']=lines[0]
if plotLPRate:
# R2 = R1 - LP
# R2 - R1 = -LP
# LP = R1 - R2
lines = pltLDSErgVecHelper(ax2,dfSegReprVec,'LP_AV',attrsDctLDS['Seg_LP_AV_Attrs'])
yLines['LP_AV Seg']=lines[0]
if plotR2FillSeg:
df=dfSegReprVec
df=df.reindex(pd.date_range(start=df.index[0], end=df.index[-1], freq='1s'))
df=df.fillna(method='ffill').fillna(method='bfill')
# R2 unter 0
dummy=ax2.fill_between(df.index, df['LR_AV'],0
,where=df['LR_AV']<0,color='grey',alpha=.2)
# zwischen R2 und 0
dummy=ax2.fill_between(df.index, 0, df['LR_AV']
,where=df['LR_AV']>0
#,color='yellow',alpha=.1
,color='red',alpha=.1
)
# R2 über 0 aber unter NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=(df['LR_AV']>0) & (df['LR_AV']<df['NG_AV'])
#,color='red',alpha=.1
,color='yellow',alpha=.1
)
# R2 über NG
dummy=ax2.fill_between(df.index, df['LR_AV'], df['NG_AV']
,where=df['LR_AV']>df['NG_AV']
,color='red',alpha=.2)
if not dfDruckReprVec.empty:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'MZ_AV',attrsDctLDS['Druck_MZ_AV_Attrs'])
yLines['MZ_AV (R1) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LR_AV',attrsDctLDS['Druck_LR_AV_Attrs'])
yLines['LR_AV (R2) Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'NG_AV',attrsDctLDS['Druck_NG_AV_Attrs'])
yLines['NG_AV Drk']=lines[0]
if plotLPRate:
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'LP_AV',attrsDctLDS['Druck_LP_AV_Attrs'])
yLines['LP_AV Drk']=lines[0]
lines = pltLDSErgVecHelper(ax2,dfDruckReprVec,'QM_AV',attrsDctLDS['Druck_QM16_AV_Attrs'],fct=lambda x: x*1.6/100.)
yLines['QM16_AV Drk']=lines[0]
if plotR2FillDruck:
df=dfDruckReprVec
df=df.reindex(
|
pd.date_range(start=df.index[0], end=df.index[-1], freq='1s')
|
pandas.date_range
|
from collections import Counter
from datetime import datetime
import re
from typing import Iterator, List, Optional, Tuple, Union
from dateutil.relativedelta import relativedelta
import pandas as pd
from .utils import get_config
INCREMENT_TO_DAY_MAP = {
"month": 30.5,
"day": 7,
}
def now() -> str:
"""String ISO Timestamp of current date"""
return datetime.now().strftime("%Y-%m-%d")
class BudgetPeriod:
"""A helper class for dealing with dates and stuff for budgeting periods"""
def __init__(self, period: str = "month", relative_to: str = f"{now()[:4]}-01-01"):
"""
Parameters
----------
period
The string representation of this period. Can take the form:
[n_](week|month), or all_time
relative_to
The datetime (as a string) to start counting from when determining periods.
Defaults to the beginning of the current year
"""
if relative_to > now():
raise ValueError("relative_to must not be in the future")
self.period = period
self.relative_to = relative_to
if self.period in ["month", "week", "quarter"]:
self.increment = self.period
self.unit = 1
elif re.search(r"\d_.*", self.period):
self.increment = self.period[2:]
self.unit = int(self.period[0])
else:
raise ValueError(f"Unrecognized Period Format {self.period}")
if self.increment == "quarter":
self.increment = "month"
self.unit *= 3
def latest(self) -> str:
"""The last occurrence of this period (i.e. May 1st if period = month and today is May 15th)"""
for start_date, _ in self.bounds_iter():
pass
return start_date
def next(self) -> str:
"""The next occurrence of this period (i.e. June 1st if period = month and today is May 15th)"""
for _, end_date in self.bounds_iter():
pass
return end_date
def perc_complete(self) -> float:
"""The proportion of time through the current period"""
latest_date = datetime.strptime(self.latest(), "%Y-%m-%d")
next_date = datetime.strptime(self.next(), "%Y-%m-%d")
return (datetime.now() - latest_date).days * 1.0 / (next_date - latest_date).days
def bounds_iter(self) -> Iterator[Tuple[str, str]]:
"""An iterator that yields start & end date tuples since self.relative_to"""
start_date = datetime.strptime(self.relative_to, "%Y-%m-%d")
now_date = datetime.strptime(now(), "%Y-%m-%d")
assert start_date < now_date
while start_date < now_date:
end_date = start_date + relativedelta(**{
# Should only be "month" or "week" at this point
f"{self.increment}s": self.unit
})
yield start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d")
start_date = end_date
def translation_multiplier(self, other_period: "BudgetPeriod") -> float:
"""Get a multiplier to indicate how to transform this period to reflect the length of another"""
# Simple case
if other_period.increment == self.increment:
return other_period.unit * 1.0 / self.unit
return (
(
other_period.unit * INCREMENT_TO_DAY_MAP[other_period.increment]
) * 1.0 / (
self.unit * INCREMENT_TO_DAY_MAP[self.increment]
)
)
class BudgetItem:
"""A single budget item (category, period, TBD)"""
def __init__(self, category: str, limit: float, period: Optional[Union[BudgetPeriod, str]]):
"""
Parameters
----------
category
Name of the category for this item
limit
Budget amount for this category (over the specified period)
period
Period that this budget item applies to
"""
self.category = category
self.limit = limit
if type(period) is str:
period = BudgetPeriod(period)
self.period = period
def period_limit(self, period: BudgetPeriod) -> float:
"""Gets the translated budget amount for the period"""
if self.period is None:
return self.limit
return self.limit * self.period.translation_multiplier(period)
def on_track(self, amount_spent: float) -> bool:
"""Whether we are on track for the current period given the spending amount"""
return self.under_projected_budget(amount_spent) >= 0
def under_projected_budget(self, amount_spent: float) -> float:
"""The amount under budget we are currently"""
if self.period is None:
return self.limit - amount_spent
return self.period.perc_complete() * self.limit - amount_spent
class TotalBudgetItem(BudgetItem):
def __init__(self, limit: float, period: BudgetPeriod):
super().__init__("total", limit, period)
if self.period is None:
raise ValueError("period cannot be None for TotalBudgetItem")
class BudgetPlan:
"""A plan with overall and categorical spending limits"""
def __init__(
self,
total_budget_item: Optional[TotalBudgetItem] = None,
category_budget_items: Optional[List[BudgetItem]] = None,
):
"""
Parameters
----------
total_budget_item
The budget item for the total budget (if left empty, will use the sum of category budgets)
category_budget_items
The budgets for each category
"""
if total_budget_item is None and category_budget_items is None:
raise ValueError("Must specify one of total_budget_item or category_budget_items")
if category_budget_items is None:
self.category_budgets = {}
else:
self.category_budgets = {
budget_item.category: budget_item
for budget_item in category_budget_items
}
if total_budget_item is None:
# Using most common period from categories for default period
counter = Counter()
counter.update([
budget_item.period.period for budget_item in self.category_budgets.values()
])
default_period = BudgetPeriod(counter.most_common(1)[0][0])
total_budget_item = TotalBudgetItem(
sum([
budget_item.period_limit(default_period)
for budget_item in self.category_budgets.values()
]),
default_period,
)
self.total_budget_item = total_budget_item
@classmethod
def from_config(cls, config: dict) -> "BudgetPlan":
"""Helper to build a plan from the config.json subsection
Parameters
----------
config
A dict of the form:
{
total: float,
period: {
default: period_str,
category_1: period_str,
...
},
categories: {
category_1: float,
category_2: float,
...
}
}
where period_str take the form of:
[n_](week|month), or all_time
"""
default_period = config.get("period", {}).get("default", "month")
if config.get("total"):
total_budget_item = TotalBudgetItem(config["total"], default_period)
else:
total_budget_item = None
if config.get("categories"):
category_budget_items = [
BudgetItem(
category,
limit,
config.get("period", {}).get(category, default_period)
)
for category, limit in config["categories"].items()
]
else:
category_budget_items = None
return cls(
total_budget_item=total_budget_item,
category_budget_items=category_budget_items
)
# TODO: Use budgets for "you met X% of your goals over the last year" or something like that
class Budget:
"""A budget set by via config.json with overall and category spending limits"""
def __init__(self, transactions_df: pd.DataFrame):
self.transactions_df = transactions_df.copy()
config = get_config()["settings"].get("budget")
if config is not None:
self.budget_plan = BudgetPlan.from_config(config)
else:
self.budget_plan = None
def current_summary(self):
"""The breakdown of spending for the current period"""
summary = {
category: self._current_budget_item_summary(budget_item)
for category, budget_item in self.budget_plan.category_budgets.items()
}
summary["overall"] = self._current_budget_item_summary(self.budget_plan.total_budget_item)
return summary
def _current_budget_item_summary(self, budget_item: BudgetItem):
"""TODO"""
if budget_item.category == "total":
df = self.transactions_df
else:
df = self.transactions_df[self.transactions_df["category_1"] == budget_item.category]
if budget_item.period is not None:
df = df[df["date"] >= budget_item.period.latest()]
spending = df["amount"].sum()
return {
"category": budget_item.category,
"spending": spending,
"budget": budget_item.limit,
"over_budget": spending > budget_item.limit,
"under_projection_amount": budget_item.under_projected_budget(spending),
}
def simple_summary(self, date_inc: str, period: str) -> pd.DataFrame:
"""Returns a simple summary for a single period
Mainly used to generate a bar chart with ticks for the budget and projected spending amounts
Parameters
----------
date_inc
One of month, week, year
period
A single year, month, or week to drill into
"""
budget_period = BudgetPeriod(date_inc)
curr_df = self.transactions_df[self.transactions_df[date_inc] == period]
summary = {
"category": [],
"spent": [],
"total_budget": [],
"projected_budget": [],
}
for category, cat_budget in self.budget_plan.category_budgets.items():
limit = cat_budget.period_limit(budget_period)
projected_limit = budget_period.perc_complete() * limit
cat_spending = curr_df[curr_df["category_1"] == category]["amount"].sum()
summary["category"].append(category)
summary["spent"].append(cat_spending)
summary["total_budget"].append(limit)
summary["projected_budget"].append(projected_limit)
# Including "Other" Category
other_limit = self.total_limit(date_inc) - sum(summary["total_budget"])
summary["category"].append("Other")
summary["spent"].append(curr_df["amount"].sum() - sum(summary["spent"]))
summary["total_budget"].append(other_limit)
summary["projected_budget"].append(budget_period.perc_complete() * other_limit)
return
|
pd.DataFrame(summary)
|
pandas.DataFrame
|
"""
MicroGridsPy - Multi-year capacity-expansion (MYCE)
Linear Programming framework for microgrids least-cost sizing,
able to account for time-variable load demand evolution and capacity expansion.
Authors:
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano / Fondazione Eni Enrico Mattei
<NAME> - Department of Energy, Politecnico di Milano
<NAME> - Department of Energy, Politecnico di Milano
Based on the original model by:
<NAME> - Department of Mechanical and Aerospace Engineering, University of Liège / San Simon University, Centro Universitario de Investigacion en Energia
<NAME> - Department of Mechanical Engineering Technology, KU Leuven
"""
import pandas as pd
import re
#%% This section extracts the values of Scenarios, Periods, Years from data.dat and creates ranges for them
Data_file = "Inputs/data.dat"
Data_import = open(Data_file).readlines()
for i in range(len(Data_import)):
if "param: Scenarios" in Data_import[i]:
n_scenarios = int((re.findall('\d+',Data_import[i])[0]))
if "param: Years" in Data_import[i]:
n_years = int((re.findall('\d+',Data_import[i])[0]))
if "param: Periods" in Data_import[i]:
n_periods = int((re.findall('\d+',Data_import[i])[0]))
if "param: Generator_Types" in Data_import[i]:
n_generators = int((re.findall('\d+',Data_import[i])[0]))
scenario = [i for i in range(1,n_scenarios+1)]
year = [i for i in range(1,n_years+1)]
period = [i for i in range(1,n_periods+1)]
generator = [i for i in range(1,n_generators+1)]
#%% This section is useful to define the number of investment steps as well as to assign each year to its corresponding step
def Initialize_Upgrades_Number(model):
Data_file = "Inputs/data.dat"
Data_import = open(Data_file).readlines()
for i in range(len(Data_import)):
if "param: Years" in Data_import[i]:
n_years = int((re.findall('\d+',Data_import[i])[0]))
if "param: Step_Duration" in Data_import[i]:
step_duration = int((re.findall('\d+',Data_import[i])[0]))
if "param: Min_Last_Step_Duration" in Data_import[i]:
min_last_step_duration = int((re.findall('\d+',Data_import[i])[0]))
if n_years % step_duration == 0:
n_upgrades = n_years/step_duration
return n_upgrades
else:
n_upgrades = 1
for y in range(1, n_years + 1):
if y % step_duration == 0 and n_years - y > min_last_step_duration:
n_upgrades += 1
return int(n_upgrades)
def Initialize_YearUpgrade_Tuples(model):
upgrade_years_list = [1 for i in range(len(model.steps))]
s_dur = model.Step_Duration
for i in range(1, len(model.steps)):
upgrade_years_list[i] = upgrade_years_list[i-1] + s_dur
yu_tuples_list = [0 for i in model.years]
if model.Steps_Number == 1:
for y in model.years:
yu_tuples_list[y-1] = (y, 1)
else:
for y in model.years:
for i in range(len(upgrade_years_list)-1):
if y >= upgrade_years_list[i] and y < upgrade_years_list[i+1]:
yu_tuples_list[y-1] = (y, model.steps[i+1])
elif y >= upgrade_years_list[-1]:
yu_tuples_list[y-1] = (y, len(model.steps))
print('\nTime horizon (year,investment-step): ' + str(yu_tuples_list))
return yu_tuples_list
#%% This section imports the multi-year Demand and Renewable-Energy output and creates a Multi-indexed DataFrame for it
Demand = pd.read_excel('Inputs/Demand.xls')
Energy_Demand_Series = pd.Series()
for i in range(1,n_years*n_scenarios+1):
dum = Demand[i][:]
Energy_Demand_Series = pd.concat([Energy_Demand_Series,dum])
Energy_Demand = pd.DataFrame(Energy_Demand_Series)
frame = [scenario,year,period]
index = pd.MultiIndex.from_product(frame, names=['scenario','year','period'])
Energy_Demand.index = index
Energy_Demand_2 = pd.DataFrame()
for s in scenario:
Energy_Demand_Series_2 = pd.Series()
for y in year:
dum_2 = Demand[(s-1)*n_years + y][:]
Energy_Demand_Series_2 = pd.concat([Energy_Demand_Series_2,dum_2])
Energy_Demand_2.loc[:,s] = Energy_Demand_Series_2
index_2 = pd.RangeIndex(1,n_years*n_periods+1)
Energy_Demand_2.index = index_2
def Initialize_Demand(model, s, y, t):
return float(Energy_Demand[0][(s,y,t)])
Renewable_Energy =
|
pd.read_excel('Inputs/Renewable_Energy.xls')
|
pandas.read_excel
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 22:03:26 2018
"""
#### 0. Loading libraries, setting working directory
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import gc
import timeit
from functools import reduce
from itertools import chain
from datetime import datetime
from statsmodels.tsa.stattools import adfuller, grangercausalitytests
from sklearn import linear_model
from scipy import stats
# Loading custom functioons functions
os.chdir('F:\Damian\github\HN_SO_analysis\HN_SO_analysis\codes')
from hn_plots import hn_plots, todays_date
from diff_nonstationary import diff_nonstationary
from useful import repeated # Useful function from the Web
from grangercausalitytests_mod import grangercausalitytests_mod
from calc_granger_causality import calc_granger_causality
from sel_data_min_date import sel_data_min_date
os.chdir('F:\Damian\github\HN_SO_analysis\HN_SO_analysis')
### 1. Stack Overflow data
stack_data1 = pd.read_csv('.\\stack_data\\tags_per_day_1_20180325.csv')
stack_data2 = pd.read_csv('.\\stack_data\\tags_per_day_2_20180306.csv')
stack_data3 = pd.read_csv('.\\stack_data\\tags_per_day_3_20180306.csv')
stack_data4 = pd.read_csv(
'.\\stack_data\\tags_per_day_4_d3js_tensorflow_20180403.csv')
stack_data = pd.concat([stack_data1, stack_data2, stack_data3, stack_data4])
stack_data['tags'] = stack_data['tags'].str.replace('<', '').str.replace('>', '')
stack_data['post_date'] = pd.to_datetime(stack_data['post_date'])
stack_data.loc[stack_data['tags'] == 'd3js'].describe()
del stack_data1, stack_data2, stack_data3, stack_data4
stack_data.tags.replace('apache-spark', 'spark', inplace = True)
stack_data.tags.replace('d3.js', 'd3js', inplace = True)
stack_data = stack_data.rename(columns = {'score_sum': 'so_score_sum',
'views': 'so_views',
'answers': 'so_answers',
'favorites': 'so_favorites',
'comments': 'so_comments',
'usage_cnt': 'so_usage_cnt'})
### 2. Kaggle data
kaggle_data_raw =
|
pd.read_csv('.\\kaggle_data\\kaggle_data_20180414_1358.csv')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 06:44:09 2016
@author: subhajit
"""
import pandas as pd
import datetime
from sklearn import cross_validation
import xgboost as xgb
import numpy as np
import h5py
import os
os.chdir('D:\Data Science Competitions\Kaggle\Expedia Hotel Recommendations\codes')
def map5eval(preds, dtrain):
actual = dtrain.get_label()
predicted = preds.argsort(axis=1)[:,-np.arange(5)]
metric = 0.
for i in range(5):
metric += np.sum(actual==predicted[:,i])/(i+1)
metric /= actual.shape[0]
return 'MAP@5', -metric
def pre_process(data):
try:
data.loc[data.srch_ci.str.endswith('00'),'srch_ci'] = '2015-12-31'
data['srch_ci'] = data.srch_ci.astype(np.datetime64)
data.loc[data.date_time.str.endswith('00'),'date_time'] = '2015-12-31'
data['date_time'] = data.date_time.astype(np.datetime64)
except:
pass
data.fillna(0, inplace=True)
data['srch_duration'] = data.srch_co-data.srch_ci
data['srch_duration'] = data['srch_duration'].apply(lambda td: td/np.timedelta64(1, 'D'))
data['time_to_ci'] = data.srch_ci-data.date_time
data['time_to_ci'] = data['time_to_ci'].apply(lambda td: td/np.timedelta64(1, 'D'))
data['ci_month'] = data['srch_ci'].apply(lambda dt: dt.month)
data['ci_day'] = data['srch_ci'].apply(lambda dt: dt.day)
#data['ci_year'] = data['srch_ci'].apply(lambda dt: dt.year)
data['bk_month'] = data['date_time'].apply(lambda dt: dt.month)
data['bk_day'] = data['date_time'].apply(lambda dt: dt.day)
#data['bk_year'] = data['date_time'].apply(lambda dt: dt.year)
data['bk_hour'] = data['date_time'].apply(lambda dt: dt.hour)
data.drop(['date_time', 'user_id', 'srch_ci', 'srch_co'], axis=1, inplace=True)
if os.path.exists('../output/srch_dest_hc_hm_agg.csv'):
agg1 = pd.read_csv('../output/srch_dest_hc_hm_agg.csv')
else:
reader = pd.read_csv('../input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], chunksize=200000)
pieces = [chunk.groupby(['srch_destination_id','hotel_country','hotel_market','hotel_cluster'])['is_booking'].agg(['sum','count']) for chunk in reader]
agg = pd.concat(pieces).groupby(level=[0,1,2,3]).sum()
del pieces
agg.dropna(inplace=True)
agg['sum_and_cnt'] = 0.85*agg['sum'] + 0.15*agg['count']
agg = agg.groupby(level=[0,1,2]).apply(lambda x: x.astype(float)/x.sum())
agg.reset_index(inplace=True)
agg1 = agg.pivot_table(index=['srch_destination_id','hotel_country','hotel_market'], columns='hotel_cluster', values='sum_and_cnt').reset_index()
agg1.to_csv('../output/srch_dest_hc_hm_agg.csv', index=False)
del agg
destinations = pd.read_csv('../input/destinations.csv')
submission = pd.read_csv('../input/sample_submission.csv')
clf = xgb.XGBClassifier(#missing=9999999999,
objective = 'multi:softmax',
max_depth = 5,
n_estimators=300,
learning_rate=0.01,
nthread=4,
subsample=0.7,
colsample_bytree=0.7,
min_child_weight = 3,
#scale_pos_weight = ratio,
#reg_alpha=0.03,
#seed=1301,
silent=False)
if os.path.exists('rows_complete.txt'):
with open('rows_complete.txt', 'r') as f:
skipsize = int(f.readline())
else:
skipsize = 0
skip = 0 if skipsize==0 else range(1, skipsize)
tchunksize = 1000000
print('%d rows will be skipped and next %d rows will be used for training' % (skipsize, tchunksize))
train = pd.read_csv('../input/train.csv', parse_dates=['date_time', 'srch_ci', 'srch_co'], skiprows=skip, nrows=tchunksize)
train = train[train.is_booking==1]
train = pd.merge(train, destinations, how='left', on='srch_destination_id')
train =
|
pd.merge(train, agg1, how='left', on=['srch_destination_id','hotel_country','hotel_market'])
|
pandas.merge
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
##-------- [PPC] Jobshop Scheduling ---------
# * Author: <NAME>
# * Date: Apr 30th, 2020
# * Description:
# Using the event-driven scheuling method
# to solve the JSS prob. Here is a sample
# code with the style of OOP. Feel free to
# modify it as you like.
##--------------------------------------------
#
import os
import numpy as np
import pandas as pd
from gantt_plot import Gantt
infinity = float('inf')
#entity
class Order:
def __init__(self, ID, AT, DD, routing, PT):
self.ID = ID
self.AT = AT #AT: arrival time
self.DD = DD #DD: due date
self.PT = PT #PT: processing time
self.routing = routing
self.progress = 0
#resource in factory
class Source:
def __init__(self, order_info):
self.order_info = order_info
self.output = 0
def arrival_event(self, fac):
order_num = self.order_info.shape[0] #num of total orders
#generate and release the order
ID = self.order_info.loc[self.output, "ID"]
routing = self.order_info.loc[self.output, "routing"].split(',')
PT = [int(i) for i in self.order_info.loc[self.output, "process_time"].split(',')]
DD = self.order_info.loc[self.output, "due_date"]
AT = T_NOW
order = Order(ID, AT, DD, routing, PT)
if LOG == True:
print("{} : order {} release.".format(T_NOW, order.ID))
self.output += 1
#update the future event list - next order arrival event
if self.output < order_num:
fac.event_lst.loc["Arrival"]["time"] = self.order_info.loc[self.output, "arrival_time"]
else:
fac.event_lst.loc['Arrival']['time'] = infinity
#send order to correlated station
target = order.routing[order.progress]
machine = fac.machines[target]
machine.buffer.append(order)
#update the future event list - dispatch machines to process the jobs
if machine.state == 'idle':
fac.event_lst.loc["dispatching"]['time'] = T_NOW
class Machine:
def __init__(self, ID, DP_rule):
self.ID = ID
self.state = 'idle'
self.buffer = []
self.wspace = [] #wspace: working space
self.DP_rule = DP_rule
def start_processing(self, fac):
#check state
if self.state == 'idle':
#get a new order from buffer by DP_rule
if len(self.buffer) > 0:
if self.DP_rule == "FIFO":
order = self.buffer[0]
elif self.DP_rule == "EDD":
idx = np.argmin([j.DD for j in self.buffer])
order = self.buffer[idx]
elif self.DP_rule == "SPT":
idx = np.argmin([j.PT[j.progress] for j in self.buffer])
order = self.buffer[idx]
#remove order from buffer
self.buffer.remove(order)
#start processing the order
self.wspace.append(order)
self.state = 'busy'
processing_time = order.PT[order.progress]
#[Gantt plot preparing] udate the start/finish processing time of machine
fac.gantt_plot.update_gantt(self.ID, T_NOW, processing_time, order.ID)
if LOG == True:
print("{} : machine {} start processing order {} - {} progress".format(T_NOW, self.ID, order.ID, order.progress))
#update the future event list - job complete event
fac.event_lst.loc["{}_complete".format(self.ID)]['time'] = T_NOW + processing_time
order.progress += 1
def end_process_event(self, fac):
order = self.wspace[0]
if LOG == True:
print("{} : machine {} complete order {} - {} progress".format(T_NOW, self.ID, order.ID, order.progress))
self.wspace.remove(order)
self.state = 'idle'
#send the processed order to next place
if order.progress >= len(order.routing):
#update factory statistic
fac.throughput += 1
#update order statistic
fac.update_order_statistic(order)
else:
#send the order to next station
target = order.routing[order.progress]
next_machine = fac.machines[target]
next_machine.buffer.append(order)
#update the future event list - wait for the dispatching to get a new job
fac.event_lst.loc["dispatching"]['time'] = T_NOW
fac.event_lst.loc["{}_complete".format(self.ID)]["time"] = infinity
class Factory:
def __init__(self, order_info, DP_rule):
self.order_info = order_info
self.DP_rule = DP_rule
self.event_lst = pd.DataFrame(columns=["event_type", "time"])
#[Plug in] tool of gantt plotting
self.gantt_plot = Gantt()
#statistics
self.throughput = 0
self.order_statistic =
|
pd.DataFrame(columns = ["ID", "release_time", "complete_time", "due_date", "flow_time", "tardiness", "lateness"])
|
pandas.DataFrame
|
'''
results = StrategyTestLowHigh.objects.filter(
strategy_code=strategy, ts_code=ts_code, test_period=test_period).order_by('trade_date')
1. 获得回测关键点的trade_date (make it index?)
2. 根据filter类型,大盘 or 个股 (创建大盘历史数据表 - done)
3. 根据输入的过滤条件创建filter
4. 对dataframe进行过滤(在low high, expected pct加字段:vol,amount,ma25,ma60,ma200 - )
譬如:df[df['ma25_slope']>0],df[df['ma60_slope']>0],df[df['ma200_slope']>0],df[df['vol']>?]
5. 得到过滤后的trade_date
6. 使用第5步中的trade_date过滤low high或者expected pct
'''
import pandas as pd
from analysis.models import StockIndexHistory,StrategyTestLowHigh, BStrategyOnFixedPctTest
from .utils import get_market_code
def build_condition(filter_params=[]):
'''
return string filter connected by &
'''
# column = cond_list[0] #['vol', 'ma25_slope', 'ma60_slope']
# ops = cond_list[1] #['>', '>', '<']
# condition = cond_list[2] # [4, 0, '0']
return ' & '.join(filter_params)
def pct_on_period_filter(ts_code, trade_date_list, filter_params=[]):
'''
filter_param_list = {'ma25':'1','ma60':'0','ma200':'1','vol':'25670'}
指数
index trade_date ma25_slope ma60_slope ma200_slope vol
0 20200101 0.23 0.1 -0.123 234244
1 20200219 0.23 0.1 -0.123 234244
个股
index trade_date ma25_slope ma60_slope ma200_slope vol
0 20200101 0.23 0.1 -0.123 234244
1 20200219 0.23 0.1 -0.123 234244
e.g.
limits_dic = {"A" : 0, "B" : 2, "C" : 0}
query = ' & '.join(['{}>{}'.format(k, v) for k, v in limits_dic.items()])
temp = df.query(query)
temp
e.g 2
df = pd.DataFrame({'gender':list('MMMFFF'),
'height':[4,5,4,5,5,4],
'age':[70,80,90,40,2,3]})
df
column = ['height', 'age', 'gender']
equal = ['>', '>', '==']
condition = [4, 20, 'M']
query = ' & '.join(f'{i} {j} {repr(k)}' for i, j, k in zip(column, equal, condition))
df.query(query)
idx1 = pd.Index([1, 2, 3, 4])
idx2 = pd.Index([3, 4, 5, 6])
idx1.intersection(idx2)
{'I': }
'''
df_index = None
df_stock = None
for key in filter_params:
if key == 'I':
if len(filter_params[key]) > 0:
results = StockIndexHistory.objects.filter(ts_code=get_market_code(ts_code), trade_date__in=trade_date_list).order_by('trade_date')
df = pd.DataFrame(results.values('trade_date','ma25_slope','ma60_slope','ma200_slope','vol','amount'))
if df is not None and len(df) > 0:
query = build_condition(filter_params[key])
df_index = df.query(query)
if key == 'E':
if len(filter_params[key]) > 0:
results = StrategyTestLowHigh.objects.filter(ts_code=ts_code, trade_date__in=trade_date_list).order_by('trade_date')
df = pd.DataFrame(results.values(
'trade_date', 'ma25_slope', 'ma60_slope', 'ma200_slope', 'vol', 'amount'))
if df is not None and len(df) > 0:
query = build_condition(filter_params[key])
df_stock = df.query(query)
if df_index is not None and df_stock is not None:
return list(set(df_index['trade_date']).intersection(set(df_stock['trade_date'])))
else:
return df_index['trade_date'].tolist() if df_index is not None else df_stock['trade_date'].tolist()
def period_on_pct_filter(trade_date_list, filter_params=[]):
df_index = None
df_stock = None
for key in filter_params:
if key == 'I':
results = StockIndexHistory.objects.filter(trade_date__in=trade_date_list).order_by('trade_date')
df = pd.DataFrame(results)
if df is not None and len(df) > 0:
query = build_condition(filter_params[key])
df_index = df.query(query)
if key == 'E':
results = BStrategyOnFixedPctTest.objects.filter(trade_date__in=trade_date_list).order_by('trade_date')
df =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
import pytz
import pytest
import dateutil
import warnings
import numpy as np
from datetime import timedelta
from itertools import product
import pandas as pd
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas.core.indexes.datetimes import cdate_range
from pandas import (DatetimeIndex, PeriodIndex, Series, Timestamp, Timedelta,
date_range, TimedeltaIndex, _np_version_under1p10, Index,
datetime, Float64Index, offsets, bdate_range)
from pandas.tseries.offsets import BMonthEnd, CDay, BDay
from pandas.tests.test_base import Ops
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setup_method(self, method):
super(TestDatetimeIndexOps, self).setup_method(method)
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
f = lambda x: isinstance(x, DatetimeIndex)
self.check_ops_properties(DatetimeIndex._field_ops, f)
self.check_ops_properties(DatetimeIndex._object_ops, f)
self.check_ops_properties(DatetimeIndex._bool_ops, f)
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
pytest.raises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
assert s.year == 2000
assert s.month == 1
assert s.day == 10
pytest.raises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
assert isinstance(result, Index)
assert result.dtype == object
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert idx.tolist() == expected_list
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
assert idx1.is_monotonic
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
assert not idx2.is_monotonic
for idx in [idx1, idx2]:
assert idx.min() == Timestamp('2011-01-01', tz=tz)
assert idx.max() == Timestamp('2011-01-03', tz=tz)
assert idx.argmin() == 0
assert idx.argmax() == 2
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT])
assert pd.isna(getattr(obj, op)())
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
assert pd.isna(getattr(obj, op)())
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
assert np.min(dr) == Timestamp('2016-01-15 00:00:00', freq='D')
assert np.max(dr) == Timestamp('2016-01-20 00:00:00', freq='D')
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, errmsg, np.min, dr, out=0)
tm.assert_raises_regex(ValueError, errmsg, np.max, dr, out=0)
assert np.argmin(dr) == 0
assert np.argmax(dr) == 5
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assert_raises_regex(
ValueError, errmsg, np.argmin, dr, out=0)
tm.assert_raises_regex(
ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assert_raises_regex(ValueError, msg):
rng.round(freq='foo')
with tm.assert_raises_regex(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assert_raises_regex(ValueError, msg, rng.round, freq='M')
tm.assert_raises_regex(ValueError, msg, elt.round, freq='M')
# GH 14440 & 15578
index = pd.DatetimeIndex(['2016-10-17 12:00:00.0015'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.002000'], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ['us', 'ns']:
tm.assert_index_equal(index, index.round(freq))
index = pd.DatetimeIndex(['2016-10-17 12:00:00.00149'], tz=tz)
result = index.round('ms')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001000'], tz=tz)
tm.assert_index_equal(result, expected)
index = pd.DatetimeIndex(['2016-10-17 12:00:00.001501031'])
result = index.round('10ns')
expected = pd.DatetimeIndex(['2016-10-17 12:00:00.001501030'])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning():
ts = '2016-10-17 12:00:00.001501031'
pd.DatetimeIndex([ts]).round('1010ns')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
assert result.freq is None
assert len(result) == 5 * len(rng)
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
assert res.freq is None
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
assert res.freq is None
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assert_raises_regex(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
assert result == expected
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
assert result == expected
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
assert idx.resolution == expected
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with pytest.raises(TypeError):
dti + dti
with pytest.raises(TypeError):
dti_tz + dti_tz
with pytest.raises(TypeError):
dti_tz + dti
with pytest.raises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
assert idx[0] in idx
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, idx)
assert ordered.freq == idx.freq
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, idx)
tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2]),
check_dtype=False)
assert ordered.freq == idx.freq
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
tm.assert_index_equal(ordered, expected)
tm.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
assert ordered.freq == expected.freq
assert ordered.freq.n == -1
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
tm.assert_index_equal(ordered, expected)
assert ordered.freq is None
ordered = idx.sort_values(ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True)
tm.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
tm.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
tm.assert_numpy_array_equal(indexer, exp, check_dtype=False)
assert ordered.freq is None
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
tm.assert_index_equal(idx, result)
assert idx.freq == result.freq
idx_dup = idx.append(idx)
assert idx_dup.freq is None # freq is reset
result = idx_dup.drop_duplicates()
tm.assert_index_equal(idx, result)
assert result.freq is None
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp('2011-01-01', tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
assert result.freq == freq
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
assert pd.DatetimeIndex._na_value is pd.NaT
assert pd.DatetimeIndex([])._na_value is pd.NaT
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
assert not idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
assert idx._can_hold_na
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
assert idx.hasnans
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.asobject)
assert idx.asobject.equals(idx)
assert idx.asobject.equals(idx.asobject)
assert not idx.equals(list(idx))
assert not idx.equals(pd.Series(idx))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
assert not idx.equals(idx2)
assert not idx.equals(idx2.copy())
assert not idx.equals(idx2.asobject)
assert not idx.asobject.equals(idx2)
assert not idx.equals(list(idx2))
assert not idx.equals(pd.Series(idx2))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
assert not idx.equals(idx3)
assert not idx.equals(idx3.copy())
assert not idx.equals(idx3.asobject)
assert not idx.asobject.equals(idx3)
assert not idx.equals(list(idx3))
assert not idx.equals(pd.Series(idx3))
class TestDateTimeIndexToJulianDate(object):
def test_1700(self):
r1 = Float64Index([2345897.5, 2345898.5, 2345899.5, 2345900.5,
2345901.5])
r2 = date_range(start=Timestamp('1710-10-01'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
r1 = Float64Index([2451601.5, 2451602.5, 2451603.5, 2451604.5,
2451605.5])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='D').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
r1 = Float64Index(
[2451601.5, 2451601.5416666666666666, 2451601.5833333333333333,
2451601.625, 2451601.6666666666666666])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='H').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
r1 = Float64Index(
[2451601.5, 2451601.5006944444444444, 2451601.5013888888888888,
2451601.5020833333333333, 2451601.5027777777777777])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='T').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
r1 = Float64Index(
[2451601.5, 2451601.500011574074074, 2451601.5000231481481481,
2451601.5000347222222222, 2451601.5000462962962962])
r2 = date_range(start=Timestamp('2000-02-27'), periods=5,
freq='S').to_julian_date()
assert isinstance(r2, Float64Index)
tm.assert_index_equal(r1, r2)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
|
Timestamp('2000-12-31')
|
pandas.Timestamp
|
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Datetime,
Double,
Integer,
)
from evalml.pipelines import DelayedFeatureTransformer
@pytest.fixture
def delayed_features_data():
X = pd.DataFrame({"feature": range(1, 32)})
y = pd.Series(range(1, 32))
return X, y
def test_delayed_features_transformer_init():
delayed_features = DelayedFeatureTransformer(
max_delay=4,
delay_features=True,
delay_target=False,
date_index="Date",
random_seed=1,
)
assert delayed_features.parameters == {
"max_delay": 4,
"delay_features": True,
"delay_target": False,
"gap": 0,
"forecast_horizon": 1,
"date_index": "Date",
}
def encode_y_as_string(y):
y = y.astype("category")
y_answer = y.astype(int) - 1
y = y.map(lambda val: str(val).zfill(2))
return y, y_answer
def encode_X_as_string(X):
X_answer = X.astype(int) - 1
# So that the encoder encodes the values in ascending order. This makes it easier to
# specify the answer for each unit test
X.feature = pd.Categorical(X.feature.map(lambda val: str(val).zfill(2)))
return X, X_answer
def encode_X_y_as_strings(X, y, encode_X_as_str, encode_y_as_str):
y_answer = y
if encode_y_as_str:
y, y_answer = encode_y_as_string(y)
X_answer = X
if encode_X_as_str:
X, X_answer = encode_X_as_string(X)
return X, X_answer, y, y_answer
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay3_forecasthorizon1_gap0(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, gap=0, forecast_horizon=1).fit_transform(
X=X, y=y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, gap=0, forecast_horizon=1).fit_transform(
X=None, y=y
),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay5_forecasthorizon1_gap0(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"feature_delay_5": X_answer.feature.shift(5),
"feature_delay_6": X_answer.feature.shift(6),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=5, gap=0, forecast_horizon=1).fit_transform(
X, y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
"target_delay_5": y_answer.shift(5),
"target_delay_6": y_answer.shift(6),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=5, gap=0, forecast_horizon=1).fit_transform(
X=None, y=y
),
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_delayed_feature_extractor_maxdelay3_forecasthorizon7_gap1(
encode_X_as_str, encode_y_as_str, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame(
{
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"feature_delay_11": X_answer.feature.shift(11),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X, y
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X=None, y=y
),
)
def test_delayed_feature_extractor_numpy(delayed_features_data):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(X, y, False, False)
X_np = X.values
y_np = y.values
answer = pd.DataFrame(
{
"0_delay_8": X_answer.feature.shift(8),
"0_delay_9": X_answer.feature.shift(9),
"0_delay_10": X_answer.feature.shift(10),
"0_delay_11": X_answer.feature.shift(11),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X_np, y_np
),
)
answer_only_y = pd.DataFrame(
{
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
"target_delay_11": y_answer.shift(11),
}
)
assert_frame_equal(
answer_only_y,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7, gap=1).fit_transform(
X=None, y=y_np
),
)
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_lagged_feature_extractor_delay_features_delay_target(
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
all_delays = pd.DataFrame(
{
"feature_delay_1": X_answer.feature.shift(1),
"feature_delay_2": X_answer.feature.shift(2),
"feature_delay_3": X_answer.feature.shift(3),
"feature_delay_4": X_answer.feature.shift(4),
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
if not delay_features:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "feature_" in c]
)
if not delay_target:
all_delays = all_delays.drop(
columns=[c for c in all_delays.columns if "target" in c]
)
transformer = DelayedFeatureTransformer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
)
assert_frame_equal(all_delays, transformer.fit_transform(X, y))
@pytest.mark.parametrize(
"delay_features,delay_target", [(False, True), (True, False), (False, False)]
)
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
def test_lagged_feature_extractor_delay_target(
encode_y_as_str,
encode_X_as_str,
delay_features,
delay_target,
delayed_features_data,
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
answer = pd.DataFrame()
if delay_target:
answer = pd.DataFrame(
{
"target_delay_1": y_answer.shift(1),
"target_delay_2": y_answer.shift(2),
"target_delay_3": y_answer.shift(3),
"target_delay_4": y_answer.shift(4),
}
)
transformer = DelayedFeatureTransformer(
max_delay=3,
forecast_horizon=1,
delay_features=delay_features,
delay_target=delay_target,
)
assert_frame_equal(answer, transformer.fit_transform(None, y))
@pytest.mark.parametrize("encode_X_as_str", [True, False])
@pytest.mark.parametrize("encode_y_as_str", [True, False])
@pytest.mark.parametrize("data_type", ["ww", "pd"])
def test_delay_feature_transformer_supports_custom_index(
encode_X_as_str, encode_y_as_str, data_type, make_data_type, delayed_features_data
):
X, y = delayed_features_data
X, X_answer, y, y_answer = encode_X_y_as_strings(
X, y, encode_X_as_str, encode_y_as_str
)
X.index = pd.RangeIndex(50, 81)
X_answer.index = pd.RangeIndex(50, 81)
y.index = pd.RangeIndex(50, 81)
y_answer.index = pd.RangeIndex(50, 81)
answer = pd.DataFrame(
{
"feature_delay_7": X_answer.feature.shift(7),
"feature_delay_8": X_answer.feature.shift(8),
"feature_delay_9": X_answer.feature.shift(9),
"feature_delay_10": X_answer.feature.shift(10),
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=pd.RangeIndex(50, 81),
)
X = make_data_type(data_type, X)
y = make_data_type(data_type, y)
assert_frame_equal(
answer,
DelayedFeatureTransformer(max_delay=3, forecast_horizon=7).fit_transform(X, y),
)
answer_only_y = pd.DataFrame(
{
"target_delay_7": y_answer.shift(7),
"target_delay_8": y_answer.shift(8),
"target_delay_9": y_answer.shift(9),
"target_delay_10": y_answer.shift(10),
},
index=
|
pd.RangeIndex(50, 81)
|
pandas.RangeIndex
|
# coding: utf-8
"""
Classifiers.
Based on sklearn doc:
"http://scikit-learn.org/dev/developers/contributing.html\
#rolling-your-own-estimator"
"""
from itertools import product
import numpy as np
import pandas as pd
from scipy.optimize import LinearConstraint, minimize
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
from .methods import KernelMethod
def multiclass2one_vs_all(labels, first_class=1):
"""Transform multiclas label to 2 class labels
Params:
labels (array-like): list of labels
first_class: label considered as not the rest
Returns:
(list) list of labels containing only 1/-1
"""
if first_class not in labels:
first_class = labels[0]
return [1 if elt == first_class else -1 for elt in labels]
class SVDD(BaseEstimator, ClassifierMixin, KernelMethod):
"""Implement Support Vector DataDescription
.. math::
\\begin{cases}
min_{r, c} & r^2 - C \\sum_t \\xi_t \\\\
s.t & y_i \\| \\phi(x_i) -c \\| < r^2 + xi_i \\forall i \\\\
& \\xi_i > 0 \\forall i \\\\
\\end{cases}
"""
def __init__(self, kernel_matrix=None, kernel=None, C=1):
"""Initialize some parameters.
Those parameters may be overwritten by the fit() method.
"""
self.kernel_matrix = kernel_matrix # kernel matrix used for training
if kernel is None:
self.kernel = np.dot
else:
self.kernel = kernel
self.C = C
self.string_labels = False # are labels strings or int?
self.hypersphere_nb = 1
self.trained_on_sample = True # use directly kernel matrix or sample?
def fit(self, X, y=None, C=None, kernel=None, is_kernel_matrix=False):
"""Fit the classifier.
Args:
X: training samples.
y: training labels. If None, consider all samples belongs to the
same class (labeled "1").
C (numeric): contraint in the soft margin case. If None or zero,
then fall back to hard margin case.
kernel (fun): kernel method to use. (default: linear)
is_kernel_matrix (bool): if True, the input is treated as
a kernel matrix.
"""
# X, y = check_X_y(X, y) # TODO: add check method for X
self._classifier_checks(X, y, C, kernel, is_kernel_matrix)
if len(self.classes_) > 2 or (
len(self.classes_) == 2 and self.string_labels
):
# each class has its own hypersphere (one class vs rest)
self.hypersphere_nb = len(self.classes_)
self.individual_svdd = {}
for cl in self.classes_:
# TODO: multithread/asyncio
cl_svdd = SVDD(
kernel_matrix=self.kernel_matrix,
kernel=self.kernel,
C=self.C,
)
cl_y = [1 if elt == cl else -1 for elt in y]
cl_svdd.fit(X, cl_y, C, kernel, is_kernel_matrix)
self.individual_svdd[cl] = cl_svdd
self.y_ = y
self.alphas_ = np.array([0])
self.radius_ = 0
else:
# one hypersphere
self.y_ = np.sign(y)
self.radius_, self.alphas_ = self._fit_one_hypersphere()
return self
def predict(self, X, decision_radius=1):
"""Predict classes
Args:
X (array like): list of test samples.
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere
whose radius is multiply by this factor.
"""
check_is_fitted(self, ["X_", "alphas_"])
# X = check_array(X)
if self.hypersphere_nb == 1:
return self._predict_one_hypersphere(X, decision_radius)
else:
# check class
dist_classes = self.relative_dist_all_centers(X)
return np.array(dist_classes.idxmin(axis=1))
def fit_predict(self, X, y, C=None, kernel=None, is_kernel_matrix=False):
"""Fit as the fit() methods.
Returns:
(array) : class for each training sample.
"""
self.fit(X, y, C, kernel, is_kernel_matrix)
self.predict(X)
def _predict_one_hypersphere(self, X=None, decision_radius=1):
"""Compute results for one hypersphere
Args:
decision_radius (numeric): modification of decision radius.
The frontier between classes will be the computed hypersphere whose
radius is multiply by this factor.
Returns:
(np.array)
"""
pred = self._dist_center(X) * decision_radius / self.radius_ - 1
ret = np.sign(pred).reshape(-1)
return list(map(lambda x: 1 if x == 0 else x, ret))
def decision_function(self, X):
"""Generic decision value.
Args:
X (array-like): list of sample
"""
return self._dist_center(X) / self.radius_
def _dist_center(self, X=None):
"""Compute ditance to class center.
Args:
X (array-like): list of input vectors. If None, use the train set.
Distance to center:
.. math::
\\| z - c \\|^2 = \\|z\\|^2 - 2 K(z, c) + \\|c\\|^2
c = \\sum_t \\alpha_t \\phi(X_t)
"""
if not self.hypersphere_nb == 1:
raise RuntimeWarning("Not available for multiclass SVDD")
check_is_fitted(self, ["X_", "alphas_"])
dim = len(self.alphas_)
if X is None:
# return distances for training set
square_dists = [
self.kernel_matrix[i, i]
- 2
* sum(
self.alphas_[t] * self.kernel_matrix[i, t]
for t in range(dim)
)
+ sum(
self.alphas_[t]
* self.alphas_[s]
* self.kernel_matrix[s, t]
for s in range(dim)
for t in range(dim)
)
for i in range(dim)
]
else:
# return distances for vector X
square_dists = [
self.kernel(z, z)
- 2
* sum(
self.alphas_[t] * self.kernel(self.X_[t], z)
for t in range(dim)
)
+ sum(
self.alphas_[s]
* self.alphas_[t]
* self.kernel(self.X_[t], self.X_[s])
for s in range(dim)
for t in range(dim)
)
for z in X
]
return np.sqrt(square_dists)
def _fit_one_hypersphere(self, y=None, class1=1, class2=-1):
"""Perform actual fit process
* compute alphas
* compute support vectors
* recompute minimal kernel matrix
"""
if y is None:
y = self.y_
dim = len(self.X_)
alphas = [1 / dim] * dim
C = self.C
upper = C * np.ones(dim)
one = np.array([1])
# TODO: test other solver
# https://pypi.org/project/quadprog/
# http://cvxopt.org/r
def ell_d(al):
"""Dual function to minimize.
function to maximize:
.. maths::
L_D = \\alpha diag(K)^T - \\alpha K \\alpha^T
L_D = \\sum_s \\alpha_s K<x_s, x_s>
- \\sum_s \\sum_t \\alpha_s \\alpha_t K(x_s, x_t)
"""
ay = al * y
return -(
np.mat(ay).dot(np.diag(self.kernel_matrix))
- np.mat(ay).dot(self.kernel_matrix).dot(np.mat(ay).T)
)
cons = [
# \forall i 0 \leq \alpha[i] \leq C
LinearConstraint(A=np.identity(dim), lb=np.zeros(dim), ub=upper),
# \sum_i \alpha[i] = 1
LinearConstraint(A=np.ones(dim), lb=one, ub=one),
]
# TODO: asyncio
predicted_alphas = minimize(
ell_d, alphas, constraints=cons, options={"maxiter": 10000}
)
if not predicted_alphas.success:
raise RuntimeError(predicted_alphas.message)
alphas = predicted_alphas.x
# nullify almost null alphas:
alphas = list(map(lambda x: 0 if np.isclose(x, 0) else x, alphas))
# support vectors: 0 < alphas <= C
support_vectors = set.intersection(
set(np.where(np.less_equal(alphas, C))[0]),
set(np.nonzero(alphas)[0]),
)
self.support_vectors_ = self.support_vectors_.union(support_vectors)
if len(self.support_vectors_) < 2:
radius = np.min(
self.distance_matrix() + np.diag([C for _ in range(dim)])
)
else:
# mean distance to support vectors
radius = np.mean(
[
self.dist_center_training_sample(r, alphas)
for r in self.support_vectors_
]
)
return radius, np.array(alphas)
def dist_all_centers(self, X=None):
"""Return distance to each class center.
"""
if self.hypersphere_nb > 1:
dist_classes = {
cl: svdd._dist_center(X)
for cl, svdd in self.individual_svdd.items()
}
else:
dist_classes = {1: self._dist_center(X)}
return pd.DataFrame(dist_classes)
def relative_dist_all_centers(self, X=None):
"""Distane to all centers divided by class radius.
"""
if self.hypersphere_nb > 1:
dist_classes = {
cl: svdd._dist_center(X) / svdd.radius_
for cl, svdd in self.individual_svdd.items()
}
else:
dist_classes = {1: self._dist_center(X) / self.radius_}
return
|
pd.DataFrame(dist_classes)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import warnings
import itertools
import datetime
import os
from math import sqrt
#import seaborn as sns
class ContagionAnalysis():
def __init__(self, world):
self.world = world
# time as lable to write files
now = datetime.datetime.now()
self.now = now.strftime("%Y-%m-%d_%H:%M")
def run_contaigon_analysis(self, opinion_type, analysis="expo_frac", n_bins = 20, binning = True, save_plots = False, show_plot=True, write_data = True, output_folder = ""):
''' Makes a full contagion analysis
Parameters:
opinion_type: (str) name of trait
analysis: (str) name of analysis type (expo_frac, expo_nmb)
n_bins: (int) number of bins
binning: (bool) if to do binning
save_plots: (bool) if to save plot on hd
write_data: (bool) if to write data on hd
ouput_folder: (str) folder to save data + plots
'''
# name to lable files
name = self.world.name + \
"_" + analysis + \
"_" + self.now
self.output_folder = output_folder
print("Write into: " + self.TEMP_DIR + output_folder)
if not os.path.exists(self.TEMP_DIR + output_folder):
os.makedirs(self.TEMP_DIR + output_folder)
# calc exposure
exposure = self.calc_exposure(analysis, opinion_type)
#write data
if write_data:
exposure.to_pickle(self.TEMP_DIR + output_folder + "exposure_" + name + ".pkl")
# calc trait change
data, expo_agg = self.opinion_change_per_exposure(exposure, opinion_type)
#write data
if write_data:
data.to_pickle(self.TEMP_DIR + output_folder + "data_" + name + ".pkl")
# plot
plot_data = self.plot_opinion_change_per_exposure_number(data, analysis, binning, n_bins, \
save_plots, show_plot)
return [data, plot_data]
def _get_neighbors(self,g,i):
''' returns neighbors of node i in graph g '''
try:
return [n for n in g[i]]
except KeyError:
return []
def _calc_expo_frac(self, node_id, opinion_type, t, op_nodes, graph, all_opinions):
''' Calculate exposure as fraction of encounters to people with other opinion '''
neighbors = self._get_neighbors(graph, node_id)
opinions = op_nodes.loc[neighbors]
nmb_1 = opinions.loc[opinions[opinion_type] == True, opinion_type].count()
nmb_2 = opinions.loc[opinions[opinion_type] == False, opinion_type].count()
exposure = pd.DataFrame({ opinion_type: [True, False],\
'n_influencer': [nmb_1, nmb_2],\
'frac_influencer': [nmb_1, nmb_2] })
if (len(neighbors) <= 2) & (self.world.type == "SYN"):
if self.world.cc == True:
exposure *= 0
# normalize exposure
if len(neighbors) > 0:
exposure.frac_influencer /= len(neighbors)
exposure['n_nbs'] = len(neighbors)
exposure['node_id'] = node_id
exposure['time'] = t
return exposure
def calc_exposure(self, analysis, opinion_type, exposure_time = 7):
''' Calculate exposure for opinion type, distinguish between different analysis types '''
print("INFO: Calc exposure...")
# prepare some varibales for late use
all_opinions = pd.DataFrame( self.world.op_nodes[opinion_type].unique(), \
columns=[opinion_type])
nodes = self.world.op_nodes.node_id.unique()
self.world.op_nodes.time =
|
pd.to_datetime(self.world.op_nodes.time)
|
pandas.to_datetime
|
#!/usr/bin/env python
"""Integration testing."""
# System
import os
import logging
import time
import traceback
import sys
from uuid import uuid4 as uuid
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
# Third Party
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier # , AdaBoostClassifier, GradientBoostingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from xgboost.sklearn import XGBClassifier
# needed for module level import
sys.path.insert(0, os.path.dirname(os.path.dirname(sys.argv[0])))
from tests.testing_api import NumerAPI
# the methods we're testing
from submission_criteria.concordance import get_sorted_split
from submission_criteria.concordance import has_concordance
from submission_criteria.concordance import get_competition_variables_from_df
DATA_SET_PATH = 'tests/numerai_datasets'
DATA_SET_FILE = 'numerai_dataset'
TRAIN_FILE = 'numerai_training_data.csv'
TOURN_FILE = 'numerai_tournament_data.csv'
test_csv = "tests/test_csv"
clf_n_jobs = 2
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(name)s: %(message)s")
logger = logging.getLogger('integration_test')
upload_executor = ThreadPoolExecutor(max_workers=10)
clf_executor = ThreadPoolExecutor(max_workers=clf_n_jobs)
submission_executor = ThreadPoolExecutor(max_workers=2)
class NumeraiApiWrapper(NumerAPI):
"""
Skips uploading to server to check concordance; not an actual integration test
if this wrapper is used, but the relevant parts are tested. Use the normal NumerAPI instead
of NumerApiWrapper and this test will be an integration test behaving in the same way, but
since there's not dedicated test token that can be used, and no zip support on upload, and
there's roughly 500 MB currently needing to be uploaded every time this test is run, it's
not always practical.
Probably we should mock submission-criteria with an in-memory postgresql db, and try to call
the relevant methods from this wrapper to avoid trying to mock http requests (we're not
testing the NumerAPI after all).
"""
def __init__(self, public_id=None, secret_key=None, verbosity="INFO"):
super(NumeraiApiWrapper, self).__init__(public_id, secret_key,
verbosity)
self.checked = set()
self.cluster_ids = dict()
self.clusters = dict()
self.futures = dict()
def set_data(self, tournament_data, training_data):
self.cluster_ids = {
'test':
tournament_data[tournament_data.data_type ==
'test'].id.copy().values.ravel(),
'valid':
tournament_data[tournament_data.data_type ==
'validation'].id.copy().values.ravel(),
'live':
tournament_data[tournament_data.data_type == 'live'].id.copy().
values.ravel(),
}
self.clusters = get_competition_variables_from_df(
'1', training_data, tournament_data, self.cluster_ids['valid'],
self.cluster_ids['test'], self.cluster_ids['live'])
def upload_predictions(self, file_path):
sub_id = str(uuid())
self.futures[sub_id] = submission_executor.submit(
self.check_concordance, file_path)
return sub_id
def check_concordance(self, submission_file_path):
submission = pd.read_csv(submission_file_path)
ids_valid, ids_test, ids_live = self.cluster_ids[
'valid'], self.cluster_ids['test'], self.cluster_ids['live']
p1, p2, p3 = get_sorted_split(submission, ids_valid, ids_test,
ids_live)
c1, c2, c3 = self.clusters['cluster_1'], self.clusters[
'cluster_2'], self.clusters['cluster_3']
has_it = has_concordance(p1, p2, p3, c1, c2, c3)
# logger.info('submission %s has concordance? %s' % (submission_file_path, str(has_it)))
return has_it
def submission_status(self, submission_id=None):
if submission_id not in self.futures:
raise ValueError('unknown submission id %s' % submission_id)
f = self.futures.get(submission_id)
if not f.done():
pending = True
value = False
else:
pending = False
value = f.result()
return {
'concordance': {
'pending': pending,
'value': value
}
}
def main():
# when running on circleci, set the vars in the project settings
public_id = os.environ.get('NUMERAPI_PUBLIC_ID', '')
secret_key = os.environ.get('NUMERAPI_SECRET_KEY', '')
if not os.path.exists(test_csv):
os.makedirs(test_csv)
napi = NumeraiApiWrapper(public_id=public_id, secret_key=secret_key)
if not os.path.exists(DATA_SET_PATH):
logger.info("Downloading the current dataset...")
os.makedirs(DATA_SET_PATH)
napi.download_current_dataset(
dest_path=DATA_SET_PATH,
dest_filename=DATA_SET_FILE + '.zip',
unzip=True)
import shutil
shutil.move(
os.path.join(DATA_SET_PATH, DATA_SET_FILE, TRAIN_FILE),
os.path.join(DATA_SET_PATH, TRAIN_FILE))
shutil.move(
os.path.join(DATA_SET_PATH, DATA_SET_FILE, TOURN_FILE),
os.path.join(DATA_SET_PATH, TOURN_FILE))
else:
logger.info("Found old data to use.")
training_data = pd.read_csv(
'%s/%s' % (DATA_SET_PATH, TRAIN_FILE), header=0)
tournament_data = pd.read_csv(
'%s/%s' % (DATA_SET_PATH, TOURN_FILE), header=0)
napi.set_data(tournament_data, training_data)
features = [f for f in list(training_data) if "feature" in f]
features = features[:len(features) //
2] # just use half, speed things up a bit
X, Y = training_data[features], training_data[
"target_bernie"] # hardcode to target bernie for now
x_prediction = tournament_data[features]
ids = tournament_data["id"]
clfs = [
RandomForestClassifier(
n_estimators=15,
max_features=1,
max_depth=2,
n_jobs=1,
criterion='entropy',
random_state=42),
XGBClassifier(
learning_rate=0.1,
subsample=0.4,
max_depth=2,
n_estimators=20,
nthread=1,
seed=42),
DecisionTreeClassifier(max_depth=5, random_state=42),
MLPClassifier(alpha=1, hidden_layer_sizes=(25, 25), random_state=42),
GaussianNB(),
QuadraticDiscriminantAnalysis(tol=1.0e-3),
# last item can have multiple jobs since it may be the last to be processed so we have an extra core
LogisticRegression(
n_jobs=2,
solver='sag',
C=1,
tol=1e-2,
random_state=42,
max_iter=50)
]
before = time.time()
fit_all(clfs, X, Y)
logger.info('all clfs fit() took %.2fs' % (time.time() - before))
before = time.time()
uploads_wait_for_legit = predict_and_upload_legit(napi, clfs, x_prediction,
ids)
logger.info(
'all legit clfs predict_proba() took %.2fs' % (time.time() - before))
before = time.time()
uploads_wait_for_mix = predict_and_upload_mix(napi, clfs, tournament_data,
x_prediction, ids)
logger.info(
'all mix clfs predict_proba() took %.2fs' % (time.time() - before))
legit_submission_ids = list()
mix_submission_ids = list()
before = time.time()
for f in futures.as_completed(uploads_wait_for_legit):
legit_submission_ids.append(f.result())
logger.info('await legit uploads took %.2fs' % (time.time() - before))
before = time.time()
for f in futures.as_completed(uploads_wait_for_mix):
mix_submission_ids.append(f.result())
logger.info('await mix uploads took %.2fs' % (time.time() - before))
n_passed_concordance = get_concordance(
napi, legit_submission_ids)
if len(n_passed_concordance) != len(clfs):
logger.error('legit passed concordance %s/%s' %
(len(n_passed_concordance), len(clfs)))
sys.exit(1)
else:
logger.info('all legit tests passed!')
n_passed_concordance = get_concordance(
napi, mix_submission_ids)
if len(n_passed_concordance) > 0:
logger.error('mix passed concordance %s/%s' %
(len(n_passed_concordance), len(clfs)))
sys.exit(1)
else:
logger.info('all mix tests passed!')
sys.exit(0)
def get_concordance(napi, _submission_ids):
submission_ids = _submission_ids.copy()
n_passed_concordance = set()
while True:
statuses = list()
for submission_id in submission_ids:
statuses.append(
upload_executor.submit(check_status, napi, submission_id))
check_later = list()
for f in futures.as_completed(statuses):
submission_id = f.result()['id']
concordance = f.result()['result']['concordance']
if concordance['pending']:
check_later.append(f.result()['id'])
if concordance['value']:
n_passed_concordance.add(submission_id)
if len(check_later) == 0:
break
submission_ids.clear()
submission_ids = check_later.copy()
return n_passed_concordance
def check_status(napi, submission_id):
try:
return {
'id': submission_id,
'result': napi.submission_status(submission_id)
}
except Exception as e:
logger.exception(traceback.format_exc())
logger.error('could not check submission status: %s' % str(e))
sys.exit(1)
def fit_all(clfs: list, X, Y):
wait_for = list()
for clf in clfs:
wait_for.append(clf_executor.submit(fit_clf, X, Y, clf))
before = time.time()
for _ in futures.as_completed(wait_for):
pass
logger.info('await fitting took %.2fs' % (time.time() - before))
def fit_clf(X, Y, clf):
before = time.time()
clf_str = str(clf).split("(")[0]
clf.fit(X, Y)
time_taken = '%.2fs' % (time.time() - before)
logger.info('fit() took %s%s (%s)' % (time_taken, ' ' *
(9 - len(time_taken)), clf_str))
return clf_str
def predict_and_upload_legit(napi, clfs: list, x_prediction, ids):
wait_for = list()
upload_wait_for = list()
for clf in clfs:
wait_for.append(
clf_executor.submit(predict_and_upload_one_legit, upload_wait_for,
napi, clf, x_prediction, ids))
before = time.time()
for _ in futures.as_completed(wait_for):
pass
logger.info('await legit predictions took %.2fs' % (time.time() - before))
return upload_wait_for
def predict_and_upload_one_legit(upload_wait_for: list, napi, clf,
x_prediction, ids):
clf_str = str(clf).split("(")[0]
before = time.time()
y_prediction = clf.predict_proba(x_prediction)
after = time.time()
out = os.path.join(test_csv, "{}-legit.csv".format(clf_str))
time_taken = '%.2fs' % (after - before)
logger.info('predict_proba() took %s%s (%s)' %
(time_taken, ' ' * (9 - len(time_taken)), out))
upload_wait_for.append(
upload_executor.submit(upload_one_legit, y_prediction, ids, out, napi))
def upload_one_legit(y_prediction, ids, out: str, napi):
try:
results = y_prediction[:, 1]
results_df =
|
pd.DataFrame(data={'probability': results})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Connectome-informed reservoir - Echo-State Network
=================================================
This example demonstrates how to use the conn2res toolbox to
perform a memory task using a human connectomed-informed
Echo-State network while playing with the dynamics of the reservoir
(Jaeger, 2000).
"""
###############################################################################
# First let's import the connectivity matrix we are going to use to define the
# connections of the reservoir. For this we will be using the human connectome
# parcellated into 1015 brain regions following the Desikan Killiany atlas
# (Desikan, et al., 2006).
import os
import numpy as np
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_DIR = os.path.join(PROJ_DIR, 'examples', 'data')
# load connectivity data
conn = np.load(os.path.join(DATA_DIR, 'connectivity.npy'))
# select one subject
subj_id = 0
conn = conn[:,:,subj_id]
n_reservoir_nodes = len(conn)
# scale conenctivity weights between [0,1]
conn = (conn-conn.min())/(conn.max()-conn.min())
# normalize connectivity matrix by the spectral radius.
from scipy.linalg import eigh
ew, _ = eigh(conn)
conn = conn/np.max(ew)
###############################################################################
# Second let's get the data to perform the task. We first generate the data and
# then we split it into training and test sets. 'x' corresponds to the input
# signals and 'y' corresponds to the output labels.
from conn2res import iodata
from conn2res import reservoir, coding
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
tasks = iodata.get_available_tasks()
for task in tasks[:]:
x, y = iodata.fetch_dataset(task)
# y = iodata.encode_labels(y)
n_samples = x.shape[0]
print(f'n_observations = {n_samples}')
n_features = x.shape[1]
print(f'n_features = {n_features}')
try: n_labels = y.shape[1]
except: n_labels = 1
print(f'n_labels = {n_labels}')
fig, axs = plt.subplots(2,1, figsize=(10,10), sharex=True)
axs = axs.ravel()
axs[0].plot(x)
axs[0].set_ylabel('Inputs')
axs[1].plot(y)
axs[1].set_ylabel('Outputs')
plt.suptitle(task)
plt.show()
plt.close()
# split data into training and test sets
x_train, x_test = iodata.split_dataset(x)
y_train, y_test = iodata.split_dataset(y)
###############################################################################
# Third we will simulate the dynamics of the reservoir using the previously
# generated input signal x (x_train and x_test).
# define set of input nodes
ctx = np.load(os.path.join(DATA_DIR, 'cortical.npy'))
subctx_nodes = np.where(ctx == 0)[0] # we use subcortical regions as input nodes
input_nodes = np.random.choice(subctx_nodes, n_features) # we select a randon set of input nodes
output_nodes = np.where(ctx == 1)[0] # we use cortical regions as output nodes
# create input connectivity matrix, which defines the connec-
# tions between the input layer (source nodes where the input signal is
# coming from) and the input nodes of the reservoir.
w_in = np.zeros((n_features, n_reservoir_nodes))
w_in[np.ix_(np.arange(n_features), input_nodes)] = 0.1 # factor that modulates the activation state of the reservoir
# We will use resting-state networks as readout modules. These intrinsic networks
# define different sets of output nodes
rsn_mapping = np.load(os.path.join(DATA_DIR, 'rsn_mapping.npy'))
rsn_mapping = rsn_mapping[output_nodes] # we select the mapping only for output nodes
# evaluate network performance across various dynamical regimes
# we do so by varying the value of alpha
alphas = np.linspace(0,2,11) #np.linspace(0,2,41)
df_subj = []
for alpha in alphas[1:]:
print(f'\n----------------------- alpha = {alpha} -----------------------')
# instantiate an Echo State Network object
ESN = reservoir.EchoStateNetwork(w_ih=w_in,
w_hh=alpha*conn.copy(),
activation_function='tanh',
)
# simulate reservoir states; select only output nodes.
rs_train = ESN.simulate(ext_input=x_train)[:,output_nodes]
rs_test = ESN.simulate(ext_input=x_test)[:,output_nodes]
# perform task
df = coding.encoder(reservoir_states=(rs_train, rs_test),
target=(y_train, y_test),
readout_modules=rsn_mapping,
# pttn_lens=()
)
df['alpha'] = np.round(alpha, 3)
# reorganize the columns
if 'module' in df.columns:
df_subj.append(df[['module', 'n_nodes', 'alpha', 'score']])
else:
df_subj.append(df[['alpha', 'score']])
df_subj =
|
pd.concat(df_subj, ignore_index=True)
|
pandas.concat
|
import warnings
import pandas as pd
import numpy as np
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import Button
from bokeh.models.callbacks import CustomJS
from bokeh.models.layouts import Column
from bokeh.io import output_file, show
from astropy import units as u
from astropy.utils.exceptions import AstropyDeprecationWarning
from astropy.coordinates import SkyCoord, Distance
from dustmaps.bayestar import BayestarWebQuery
from gaia.gaia import calculate_distance_from_parallax
from opencluster import PACKAGEDIR
def interactive_CMD(specs,cid1='g_SDSS',cid2='i_SDSS'):
'''
Simplistic tool to create an interactive
bokeh plot where outliers can be marked and saved in
'/home/ekaterina/Documents/appaloosa/stars_shortlist/share/temp'
'''
# Create some random data and put it into a ColumnDataSource
s = specs.set_index('EPIC')
s = s[[cid1, cid2, 'e_'+cid1,'e_'+cid2]].dropna(how='any')
x = list(s[cid1]-s[cid2])
y = list(s[cid2])
size = list(np.sqrt(s['e_'+cid1]**2+s['e_'+cid2]**2)*100.)
z = list(s.index.values)
source_data = ColumnDataSource(data=dict(x=x, y=y,desc=z))
# Create a button that saves the coordinates of selected data points to a file
savebutton = Button(label="Save", button_type="success")
savebutton.callback = CustomJS(args=dict(source_data=source_data), code="""
var inds = source_data.selected['1d'].indices;
var data = source_data.data;
var out = "";
for (i = 0; i < inds.length; i++) {
out += data['desc'][inds[i]] + " ";
}
var file = new Blob([out], {type: 'text/plain'});
var elem = window.document.createElement('a');
elem.href = window.URL.createObjectURL(file);
elem.download = 'selected-data.txt';
document.body.appendChild(elem);
elem.click();
document.body.removeChild(elem);
""")
# Plot the data and save the html file
p = figure(plot_width=800, plot_height=400,
#y_range=(20,7),
tools="lasso_select, reset, hover",)
p.circle(x='x', y='y', source=source_data, fill_alpha=0.8)#add size='desc' to visualize uncertainties on mag
p.xaxis.axis_label = '{}-{}'.format(cid1,cid2)
p.yaxis.axis_label = cid1
plot = Column(p, savebutton)
output_file("test.html")
show(plot)
return
def correct_for_extinction(df, dustmap='bayestar2019', thresh=10):
'''Correct for dust extinction using 3D dustmaps.
Parameters:
--------------
df : DataFrame
stellar attributes including Gaia, 2MASS, PS1
dustmap : str
Which dustmap to use. Check dustmap doc for
more dustmaps. Default: Bayestar2019
'''
dft = df.loc[:,df.columns.str.contains("Gaia")].rename(columns = lambda x : str(x)[:-5])
dft = calculate_distance_from_parallax(dft, check_GoF=False)
# Fill in the median cluster distance for targets without Gaia parallax:
dft.loc[dft['distance'].isnull(),"distance"] = dft['distance'].median()
d = np.abs(dft["distance"].values) * u.pc
ra = dft.ra.values * u.deg
dec = dft.dec.values * u.deg
# validate distance, RA, and Dec
assert len(d) == dft.shape[0]
assert len(ra) == dft.shape[0]
assert len(dec) == dft.shape[0]
ext, ext_err, flags = query_dustmap_w_percentiles(d, ra, dec, dustmap=dustmap)
# Add results from dustmap query to stars:
df['{}_ext'.format(dustmap)] = ext
df['{}_ext_err'.format(dustmap)] = ext_err
df['{}_flags'.format(dustmap)] = flags
#print(list(zip(df['{}_flags'.format(dustmap)].tolist(),d)))
# Apply flags
df.loc[df['{}_flags'.format(dustmap)] > thresh, #i.e. star too close or too far, or algorithm did not converge
['{}_ext_err'.format(dustmap),
'{}_ext'.format(dustmap)] ] = np.nan
# Convert to ext values for 2MASS and PanSTARRS
# This table is Table 1 in http://argonaut.skymaps.info/usage
conversion = dict(zip(['g_PS1', 'r_PS1', 'i_PS1', 'z_PS1',
'y_PS1', 'J_2MASS', 'H_2MASS', 'K_2MASS'],
[3.518, 2.617, 1.971, 1.549,
1.263, 0.7927, 0.4690, 0.3026]))
for key in conversion:
# This nans everything that does not have extinction values given:
# m_band_intrinsic = m_band_observed - extinction_value * conversion_factor
df[key] -= (conversion[key] * df['{}_ext'.format(dustmap)])
# Propagate uncertainty in quadrature on extinction value to photometry:
df['e_{}'.format(key)] = np.sqrt(df['e_{}'.format(key)]**2 +
(conversion[key] * df['{}_ext_err'.format(dustmap)])**2)
return df
def query_dustmap_w_percentiles(d, ra, dec, dustmap='bayestar2019'):
'''Query 3D dustmaps. Cite Green (2018) if
you use them. This func queries the Bayestar2019
dust map remotely in default mode.
(The web interface takes the same arguments
as the local interface.)
Parameters:
-----------
d : 1d array with astropy units
distance along the line of sight
ra : 1d array with astropy units
RA in ICRS
dec: 1d array with astropy units
Declination ICRS
Return:
---------
ext : 1-d array of floats
Extinction value from given dustmap
ext_err : 1-d array of floats
uncertainty on ext
flags : 1-d array of ints
0 : no extinction given
1 : uncertainties are symmetric below 0.05
2 : uncertainties are symmetric below 0.10
'''
with warnings.catch_warnings():
# silence warnings from astropy
warnings.filterwarnings('ignore', category=RuntimeWarning, append=True)
warnings.filterwarnings('ignore', category=AstropyDeprecationWarning, append=True)
# Query dustmaps frm Bayestar2019:
coords = SkyCoord(ra, dec, distance=d, frame='icrs')
q = BayestarWebQuery(version=dustmap)
E, quality = q(coords, mode='percentile', pct=[36,50,84], return_flags=True)
# Make output numpy-ish
E = np.array(E)
Efl = np.nan_to_num(E)
#print(quality)
# Flag outputs that either have asymmetric uncertainties, or are NaNs;
flags = (np.abs(2 * Efl[:,1]-Efl[:,0]-Efl[:,2]) < 0.1).astype(int) * 1 #acceptable uncertainty
flags = (np.abs(2 * Efl[:,1]-Efl[:,0]-Efl[:,2]) < 0.05).astype(int) * 2 #low uncertainty
flags[np.isnan(E[:,1])] += 4 #flags 1 and 2 failed to compute anything
flags[~quality["reliable_dist"]] += 8 #too close or too far target
flags[~quality["converged"]] += 16 #Algorithm did not converge
# define extinction value and uncertainty as mean of percentiles:
ext = E[:,1]
ext_err = (E[:,2]-E[:,0]) / 2.
return ext, ext_err, flags
def K_2MASS_to_KBB(K_2MASS, J_K_BB, e_K_2MASS=0., e_J_K_BB=0.):
'''
Kwargs are not yet tested!
Parameters:
-----------
K_2_MASS: Series of floats
K_s 2MASS magnitude
J_K_BB : Series of floats
J-K Bessel and Brett
e_K_2MASS : float
uncertainty on K_s 2MASS magnitude
e_JKBB : float
uncertainty on J-K Bessel and Brett color
Return:
--------
K_BB : K in Bessel and Brett system
e_K_BB : uncertainty on K_BB
'''
e_K_BB = e_K_2MASS**2 + 0.007**2 + J_K_BB**2 * 0.005**2 + 0.001**2 * e_J_K_BB**2
K_BB = K_2MASS + 0.039 - 0.001 * J_K_BB
return K_BB, e_K_BB
def color_2MASS_to_BB(df):
'''
Convert 2MASS J-K, H-K, and J-H to Bessel and Brett 1988
J-K, H-K, J-H, and K using relations from Carpenter2001.
'''
def e_A_B(row, A, B, e_A, e_B):
'''Calculate uncertainty on BB color.'''
e_AB = np.sqrt(e_A**2 + e_B**2)
return np.sqrt(e_AB**2 +
((A-B) * row.a1_sigma / row.a1)**2 +
row.a0_sigma**2) / row.a1
conv = pd.read_csv('{}/static/BB_to_TWOMASS.csv'.format(PACKAGEDIR))
for index, row in conv.iterrows():
b1, b2 = list(row.color)
dfb1, dfb2 = b1 + '_2MASS', b2 + '_2MASS'
e_dfb1, e_dfb2 = 'e_' + b1 + '_2MASS', 'e_' + b2 + '_2MASS'
f = lambda x: (x - row.a0) / row.a1
df['{}_{}_BB'.format(b1,b2)] = (df[dfb1] - df[dfb2]).apply(f)
df['e_{}_{}_BB'.format(b1,b2)] = e_A_B(row, df[dfb1], df[dfb2], df[e_dfb1], df[e_dfb2])
df['K_BB'], df['e_K_BB'] = K_2MASS_to_KBB(df.K_2MASS, df.J_K_BB)
return df
def color_BB_to_Johnson(df):
'''
Convert Bessel/Brett J-K, H-K, and J-H to Johnson
J-K, H-K, J-H, and K using relations from
Bessel and Brett 1988.
'''
conv = pd.read_csv('{}/static/BB_to_Johnson.csv'.format(PACKAGEDIR))
for index, row in conv.iterrows():
b1, b2 = list(row.color) # "JH" -> ["J", "H"]
bbcol = '{}_{}_BB'.format(b1,b2)
e_bbcol = 'e_{}_{}_BB'.format(b1,b2)
f = lambda x: row.a1 * x + row.a0
df['{}_{}_Johnson'.format(b1,b2)] = df[bbcol].apply(f)
e_f = lambda e_x: row.a1 * e_x
df['e_{}_{}_Johnson'.format(b1,b2)] = df[e_bbcol].apply(e_f)
df['K_Johnson'] = df['K_BB']
df['e_K_Johnson'] = df['e_K_BB']
df['J_Johnson'] = df.J_K_Johnson+df.K_Johnson
df['H_Johnson'] = df.H_K_Johnson+df.K_Johnson
df['e_J_Johnson'] = np.sqrt(df.e_J_K_Johnson**2 + df.e_K_Johnson**2)
df['e_H_Johnson'] = np.sqrt(df.e_H_K_Johnson**2 + df.e_K_Johnson**2)
return df
def color_2MASS_to_Johnson(df):
'''
Wrap up the full conversion needed to use color-temperature
relations in Boyajian+2013.
Individual functions are tested.
'''
df = color_2MASS_to_BB(df)
df = color_BB_to_Johnson(df)
return df
def Teff_Boyajian(df):
'''
Apply color-temperature relations from Boyajian et al. (2013).
Drop values if the fall out of range.
'''
df = color_2MASS_to_Johnson(df)
colorcols = dict(zip(list('JHKgrizyBV'),
['J_Johnson','H_Johnson','K_Johnson',
'g_SDSS','r_SDSS','i_SDSS',
'z_SDSS','y_SDSS','B_K2','V_K2']))
bm = pd.read_csv('{}/static/boyajian13_optimized_for_Teff_calculation.csv'.format(PACKAGEDIR), skiprows=15)
for index, row in bm.iterrows():
b1, b2 = list(row.color)
dfb1, dfb2 = colorcols[b1], colorcols[b2]
x = df[dfb1] - df[dfb2]
feh = df['FeH']
x_err = np.sqrt(df["e_" + dfb1]**2 + df["e_" + dfb2]**2)
#apply range for each CTR:
x[((x < row.mag_min) | (x > row.mag_max))] = np.nan
# apply [Fe/H] range
# Boyajian's values are centered on solar with a spread about (-0.25; 0.25)
x[((feh < -.25) | (feh > .25))] = np.nan
teff = row.a0 + row.a1 * x + row.a2 * x**2 + row.a3 * x**3
Teffdes = 'Teff_{}_{}_Boy'.format(b1, b2)
df[Teffdes] = teff
df['e_' + Teffdes] = np.sqrt((row.a1 + row.a2 * 2 * x + row.a3 * 3 * x**2)**2 * x_err**2 +
(row.sigma_percent / 100. * teff)**2)
print("Number of {} ".format(Teffdes), df[Teffdes].count())
return df
def Teff_Apsis(df, av_uncertainty=175., trange=(4099,6750)):
"""Use Apsis (Andrae et al. 2018) Teff
values using the median uncertainty
in a region with reliable estimates
according to the paper.
Parameters:
------------
df : DataFrame
Return:
------
DataFrame
"""
# use Teffs from Apsis catalog
cols = ["Teff_Apsis", "e_Teff_Apsis"]
df[cols[0]] = df["teff_val_Gaia"]
df[cols[1]] = av_uncertainty
# remove all value that fall outside of reliabel regions
condition = (df[cols[0]] > trange[0]) & (df[cols[0]] < trange[1]) & (df.BPRP_Gaia < 2.)
df.loc[~condition, cols] = np.nan
return df
def mann_formula(x, p):
'''
Parameters:
------------
x : 4-tuple of arrays of floats
0: colors,
1: J-H or Fe/H optional,
2: uncertainty on colors,
3: uncertainty on J-H or Fe/H
p : Series
coefficients from Mann+2015 Table 2
'''
sig_phot_squared = (p.b + 2 * x[0] * p.c + 3 * x[0]**2 * p.d + 4 * x[0]**3 * p.e)**2 * x[2]**2 * p.Teff_factor**2
if p.formula == 4:
teff = p.a + x[0]*p.b + x[0]**2 * p.c + x[0]**3 * p.d + x[0]**4 * p.e
elif p.formula == 6:
teff = p.a + x[0]*p.b + x[0]**2 * p.c + x[0]**3 * p.d + x[0]**4 * p.e + x[1]*p.JH_FeH
sig_phot_squared += p.JH_FeH**2 * x[3]**2 * p.Teff_factor**2
elif p.formula == 7:
teff = p.a + x[0]*p.b + x[0]**2 * p.c + x[0]**3 * p.d + x[0]**4 * p.e + x[1]*p.JH_FeH + x[1]**2 * p.JH2
sig_phot_squared += (p.JH_FeH + p.JH2 * 2)**2 * x[3]**2 * p.Teff_factor**2
# if no uncertainties on colors are given, use 500 K to blow up uncertainties
sig_phot_squared[np.isnan(sig_phot_squared)] = 250000
sig_teff = np.sqrt(sig_phot_squared + p.add_in_quadrature_K**2 + p.sigma**2)
return teff * p.Teff_factor, sig_teff
def Teff_Mann(df):
'''
Apply color-temperature relations from the Erratum to Mann+2015.
Gaia uncertainties on extinction NOT included.
'''
mm = pd.read_csv('{}/static/mann15_optimized_for_Teff_calculation.csv'.format(PACKAGEDIR))
colorcols = dict(zip(['r','z','J','BP','RP', 'E_BP_RP'],
['r_SDSS','z_SDSS', 'J_2MASS',
'BP_Gaia','RP_Gaia','e_bp_min_rp_val_Gaia']))
# the last one is for extinction
for index, row in mm.iterrows():
#if Gaia apply corrected BP-RP
if row.c1=="BP":
color = df["BPRP_Gaia_corr"]
err = df["e_BPRP_Gaia_corr"]
else:
color = df[colorcols[row.c1]] - df[colorcols[row.c2]]
err = np.sqrt(df["e_" + colorcols[row.c1]]**2 + df["e_" + colorcols[row.c2]]**2)
if row.extra == 'isJH':
extra = df.J_2MASS - df.H_2MASS
extra_err = np.sqrt(df.e_J_2MASS**2 + df.e_H_2MASS**2)
elif row.extra == 'FeH':
extra = df.FeH
extra_err = df.e_FeH
else:
extra =
|
pd.Series()
|
pandas.Series
|
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators.ipynb (unless otherwise specified).
__all__ = ['getColName', 'getColByName', 'addKey', 'nullIfEqual', 'sumInts', 'age5', 'age18', 'age24', 'age64', 'age65',
'bahigher', 'carpool', 'drvalone', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hisp', 'hh25inc',
'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhs', 'hsdipl', 'lesshs', 'male', 'mhhi', 'drvalone',
'novhcl', 'nohhint', 'othercom', 'paa', 'p2more', 'pasi', 'pubtran', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav14', 'trav45', 'trav44', 'unempr', 'unempr', 'walked', 'createAcsIndicator']
# Cell
#@title Run This Cell: Misc Function Declarations
# These functions right here are used in the calculations below.
# Finds a column matchings a substring
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
# Pulls a column from one dataset into a new dataset.
# This is not a crosswalk. calls getColByName()
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
# Return 0 if two specified columns are equal.
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
# I'm thinking this doesnt need to be a function..
def sumInts(df): return df.sum(numeric_only=True)
# Cell
#@title Run This Cell: Create age5
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age5( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_027E_Total_Female_Under_5_years',
'B01001_003E_Total_Male_Under_5_years',
'B01001_001E_Total' , 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age18
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age18( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_001E_Total',
'B01001_004E_Total_Male_5_to_9_years',
'B01001_005E_Total_Male_10_to_14_years' ,
'B01001_006E_Total_Male_15_to_17_years',
'B01001_028E_Total_Female_5_to_9_years',
'B01001_029E_Total_Female_10_to_14_years' ,
'B01001_030E_Total_Female_15_to_17_years']
columns = df.filter(regex='001E|004E|005E|006E|028E|029E|030E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='004E|005E|006E|028E|029E|030E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: Create age24
#File: age24.py
#Author: <NAME>
#Date: 9/8/21
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age24( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_007E_Total_Male_18_and_19_years',
'B01001_008E_Total_Male_20_years',
'B01001_009E_Total_Male_21_years' ,
'B01001_010E_Total_Male_22_to_24_years' ,
'B01001_031E_Total_Female_18_and_19_years' ,
'B01001_032E_Total_Female_20_years' ,
'B01001_033E_Total_Female_21_years' ,
'B01001_034E_Total_Female_22_to_24_years',
'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age64
import pandas as pd
import glob
def age64( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: age65
import pandas as pd
import glob
def age65( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: bahigher
import pandas as pd
import glob
def bahigher( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='005E|006E').sum(axis=1)
) / df['B06009_001E'] * 100
return fi
# Cell
#@title Run This Cell: - carpool
import pandas as pd
import glob
def carpool( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|017E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_017E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: - drvalone
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -elheat
import pandas as pd
import glob
def elheat( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='B25040_004E|B25040_001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B25040_004E').sum(axis=1)
) / ( df.filter(regex='B25040_001E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -empl
import pandas as pd
import glob
def empl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -fam
import pandas as pd
import glob
def fam( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -female
import pandas as pd
import glob
def female( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['female'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -femhhs
import pandas as pd
import glob
def femhhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['femhhs'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -heatgas
import pandas as pd
import glob
def heatgas( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: hisp
import pandas as pd
import glob
def hisp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_012E_Total_Hispanic_or_Latino']
columns = df.filter(regex='001E|012E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
fi['final'] = ( df.filter(regex='012E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: hh25inc
import pandas as pd
import glob
def hh25inc( df, columnsToInclude ):
df.columns = df.columns.str.replace(r"[$]", "")
fi = pd.DataFrame()
columns = ['B19001_001E_Total',
"B19001_002E_Total_Less_than_10,000",
"B19001_003E_Total_10,000_to_14,999",
"B19001_004E_Total_15,000_to_19,999",
"B19001_005E_Total_20,000_to_24,999"]
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey col: ', col, df.columns)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E|003E|004E|005E').sum(axis=1)
) / df['B19001_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -hh40inc
import pandas as pd
import glob
def hh40inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh60inc
import pandas as pd
import glob
def hh60inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh75inc
import pandas as pd
import glob
def hh75inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhchpov
import pandas as pd
import glob
def hhchpov( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhm75
import pandas as pd
import glob
def hhm75( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhs
import pandas as pd
import glob
def hhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hsdipl
import pandas as pd
import glob
def hsdipl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -lesshs
import pandas as pd
import glob
def lesshs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -male
import pandas as pd
import glob
def male( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
# @title Run This Cell : Create MHHI
#File: mhhi.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2016 INFLATION-ADJUSTED DOLLARS)
# Universe: Households
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Sustainability - Percent of Population that Walks to Work Indicator
#input:
#output:
import pandas as pd
import glob
def mhhi( df, columnsToInclude = [] ):
info = pd.DataFrame(
[
['B19001_002E', 0, 10000],
['B19001_003E', 10000, 4999 ],
['B19001_004E', 15000, 4999 ],
['B19001_005E', 20000, 4999 ],
['B19001_006E', 25000, 4999 ],
['B19001_007E', 30000, 4999],
['B19001_008E', 35000, 4999 ],
['B19001_009E', 40000, 4999 ],
['B19001_010E', 45000, 4999 ],
['B19001_011E', 50000, 9999 ],
['B19001_012E', 60000, 14999],
['B19001_013E', 75000, 24999 ],
['B19001_014E', 100000, 24999 ],
['B19001_015E', 125000, 24999 ],
['B19001_016E', 150000, 49000 ],
['B19001_017E', 200000, 1000000000000000000000000 ],
],
columns=['variable', 'lower', 'range']
)
# Final Dataframe
data_table = pd.DataFrame()
for index, row in info.iterrows():
data_table = addKey(df, data_table, row['variable'])
# Accumulate totals accross the columns.
# Midpoint: Divide column index 16 (the last column) of the cumulative totals
temp_table = data_table.cumsum(axis=1)
temp_table['midpoint'] = (temp_table.iloc[ : , -1 :] /2) # V3
temp_table['midpoint_index'] = False
temp_table['midpoint_index_value'] = False # Z3
temp_table['midpoint_index_lower'] = False # W3
temp_table['midpoint_index_range'] = False # X3
temp_table['midpoint_index_minus_one_cumulative_sum'] = False #Y3
# step 3 - csa_agg3: get the midpoint index by "when midpoint > agg[1] and midpoint <= agg[2] then 2"
# Get CSA Midpoint Index using the breakpoints in our info table.
for index, row in temp_table.iterrows():
# Get the index of the first column where our midpoint is greater than the columns value.
midpoint = row['midpoint']
midpoint_index = 0
# For each column (except the 6 columns we just created)
# The tracts midpoint was < than the first tracts value at column 'B19001_002E_Total_Less_than_$10,000'
if( midpoint < int(row[0]) or row[-6] == False ):
temp_table.loc[ index, 'midpoint_index' ] = 0
else:
for column in row.iloc[:-6]:
# set midpoint index to the column with the highest value possible that is under midpoint
if( midpoint >= int(column) ):
if midpoint==False: print (str(column) + ' - ' + str(midpoint))
temp_table.loc[ index, 'midpoint_index' ] = midpoint_index +1
midpoint_index += 1
# temp_table = temp_table.drop('Unassigned--Jail')
for index, row in temp_table.iterrows():
temp_table.loc[ index, 'midpoint_index_value' ] = data_table.loc[ index, data_table.columns[row['midpoint_index']] ]
temp_table.loc[ index, 'midpoint_index_lower' ] = info.loc[ row['midpoint_index'] ]['lower']
temp_table.loc[ index, 'midpoint_index_range' ] = info.loc[ row['midpoint_index'] ]['range']
temp_table.loc[ index, 'midpoint_index_minus_one_cumulative_sum'] = row[ row['midpoint_index']-1 ]
# This is our denominator, which cant be negative.
for index, row in temp_table.iterrows():
if row['midpoint_index_value']==False:
temp_table.at[index, 'midpoint_index_value']=1;
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# Calculation = (midpoint_lower::numeric + (midpoint_range::numeric * ( (midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
# Calculation = W3+X3*((V3-Y3)/Z3)
# v3 -> 1 - midpoint of households == sum / 2
# w3 -> 2 - lower limit of the income range containing the midpoint of the housing total == row[lower]
# x3 -> width of the interval containing the medium == row[range]
# z3 -> number of hhs within the interval containing the median == row[total]
# y3 -> 4 - cumulative frequency up to, but no==NOT including the median interval
#~~~~~~~~~~~~~~~
def finalCalc(x):
return ( x['midpoint_index_lower']+ x['midpoint_index_range']*(
( x['midpoint']-x['midpoint_index_minus_one_cumulative_sum'])/ x['midpoint_index_value'] )
)
temp_table['final'] = temp_table.apply(lambda x: finalCalc(x), axis=1)
temp_table[columnsToInclude] = df[columnsToInclude]
return temp_table
# Cell
#@ title Run This Cell: -nilf
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: novhcl
import pandas as pd
import glob
def novhcl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B08201_002E_Total_No_vehicle_available','B08201_001E_Total']
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E').sum(axis=1)
) / df['B08201_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: nohhint
import pandas as pd
import glob
def nohhint( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B28011_001E_Total',
'B28011_002E_Total_With_an_Internet_subscription',
'B28011_003E_Total_With_an_Internet_subscription_Dial-up_alone',
'B28011_004E_Total_With_an_Internet_subscription_Broadband_such_as_cable,_fiber_optic,_or_DSL',
'B28011_005E_Total_With_an_Internet_subscription_Satellite_Internet_service',
'B28011_006E_Total_With_an_Internet_subscription_Other_service',
'B28011_007E_Total_Internet_access_without_a_subscription',
'B28011_008E_Total_No_Internet_access']
columns = df.filter(regex='008E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
# Calculate
fi['nohhint'] = ( df.filter(regex='008E').sum(axis=1)
) / df['B28011_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -othercom
import pandas as pd
import glob
def othercom( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['othercom'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: paa
import pandas as pd
import glob
def paa( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total:',
'B03002_004E_Total_Not_Hispanic_or_Latino_Black_or_African_American_alone']
columns = df.filter(regex='001E|004E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['paa'] = ( df.filter(regex='004E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: -p2more
import pandas as pd
import glob
def p2more( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pasi ***
import pandas as pd
import glob
def pasi( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='006E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pubtran
import pandas as pd
import glob
def pubtran( df, columnsToInclude ):
fi =
|
pd.DataFrame()
|
pandas.DataFrame
|
from copy import deepcopy
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import AddConstTransform
from etna.transforms import FilterFeaturesTransform
from etna.transforms import LagTransform
from etna.transforms import MaxAbsScalerTransform
from etna.transforms import OneHotEncoderTransform
from etna.transforms import SegmentEncoderTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
classic_df_exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
classic_df_exog.rename(columns={"target": "exog"}, inplace=True)
df_exog = TSDataset.to_dataset(classic_df_exog)
ts = TSDataset(df=df, df_exog=df_exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame, List[str]]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog, ["regressor_1", "regressor_2"]
@pytest.fixture()
def df_and_regressors_flat() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Return flat versions of df and df_exog."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor_1": 1, "regressor_2": "3", "regressor_3": 5, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp[5:], "regressor_1": 2, "regressor_2": "4", "regressor_3": 6, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog["regressor_2"] = df_exog["regressor_2"].astype("category")
df_exog["regressor_3"] = df_exog["regressor_3"].astype("category")
return df, df_exog
@pytest.fixture
def ts_with_categoricals():
timestamp = pd.date_range("2021-01-01", "2021-01-05")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2021-01-01", "2021-01-06")
categorical_values = ["1", "2", "1", "2", "1", "2"]
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
ts = TSDataset(df=df, freq="D", df_exog=df_exog, known_future=["regressor"])
return ts
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
@pytest.fixture
def df_segments_int():
"""DataFrame with integer segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 3, "segment": 1})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 4, "segment": 2})
df = pd.concat([df1, df2], ignore_index=True)
return df
def test_check_endings_error():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df = pd.concat([df1, df2], ignore_index=True)
df = TSDataset.to_dataset(df)
ts = TSDataset(df=df, freq="D")
with pytest.raises(ValueError):
ts._check_endings()
def test_check_endings_pass():
"""Check that _check_endings method passes if there is no nans at the end of all segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 =
|
pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
|
pandas.DataFrame
|
import sys
import pandas as pd
from rich import print
from rich.table import Table
from rich.box import MINIMAL
import bgheatmaps as bgh
from pathlib import Path
import brainrender
from myterial import blue_light, blue_lighter
sys.path.append("./")
from data.dbase import db_tables
brainrender.settings.BACKGROUND_COLOR = "white"
save_fld = Path(r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis\ephys")
"""
Gets the total number of units for each brain region across all recordings.
It prints the count in a nicely formatted table + creates a heatmap to display the numbers.
"""
# ---------------------------------------------------------------------------- #
# get/count units #
# ---------------------------------------------------------------------------- #
recordings = (db_tables.Recording).fetch(as_dict=True)
all_units = []
for recording in recordings:
cf = recording["recording_probe_configuration"]
units = db_tables.Unit.get_session_units(
recording["name"],
cf,
spikes=True,
firing_rate=False,
frate_window=100,
)
units["probe_configuration"] = [cf] * len(units)
units["recording"] = [recording["mouse_id"]] * len(units)
units_regions = []
for i, unit in units.iterrows():
if "RSP" in unit.brain_region:
units_regions.append("RSP")
elif "VISp" in unit.brain_region:
units_regions.append("VISp")
else:
units_regions.append(unit.brain_region)
units["brain_region"] = units_regions
# rsites = pd.DataFrame(
# (
# db_tables.Probe.RecordingSite
# & recording
# & f'probe_configuration="{cf}"'
# ).fetch()
# )
if not len(units):
continue
all_units.append(units)
all_units =
|
pd.concat(all_units)
|
pandas.concat
|
# Series of helper classes and functions specific to this project.
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
#---------- Classes ---------- #
class DroneAttributes(object):
"""Drone attributes as a function of time, derived from log file data."""
def __init__(self, log_file_name):
self._log_file_dict = None
self._load_log(log_file_name)
@property
def log_file_dict(self):
return self._log_file_dict
@property
def airspeed(self):
array_t = np.array(self.local_velocity.index)
array_airspeed_mag = np.linalg.norm(self.local_velocity, axis=1)
df = pd.DataFrame(array_airspeed_mag, index=array_t, columns=["mag"])
df.index.name = "t"
return df
@property
def airspeed_rate(self):
df = self.airspeed
# Keep only 1 row out of each 100 rows.
# This reduces problems of divenging derivatives if dividing by a very small time step.
df = df.iloc[::100,:]
t0 = df.index[:-1].values # All values, excluding the last one.
t1 = df.index[1:].values # All values, excluding the first one.
delta_t = t1-t0
airspeed_t0 = df.mag.iloc[0:-1].values # All values, excluding the last one.
airspeed_t1 = df.mag.iloc[1:].values # All values, excluding the first one.
delta_airspeed = airspeed_t1 - airspeed_t0
data = np.array([delta_t, delta_airspeed]).T
df = pd.DataFrame(data, index=t1, columns=["delta_t", "delta_airspeed"])
df.index.name = "t"
df = df[df.delta_t != 0] # Drop all lines where delta_t equals 0 (would cause NaN or Inf values)
df["mag"] = df["delta_airspeed"] / df["delta_t"]
df = df.drop(columns=["delta_t", "delta_airspeed"])
return df
@property
def global_position(self):
l = self._log_file_dict["GLOBAL_POSITION"]
df =
|
pd.DataFrame(l, columns=["t","lat","lon","alt"], dtype=np.float32)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""<EMAIL>.
功能描述:one hot encoding for manifacture name
"""
import os
from pyspark.sql import SparkSession
from dataparepare import *
from interfere import *
from pdu_feature import *
from pyspark.sql.types import *
from pyspark.sql.functions import pandas_udf, PandasUDFType
from pyspark.sql.functions import monotonically_increasing_id
from pyspark.sql.functions import regexp_replace
from pyspark.sql.functions import concat
from pyspark.sql.functions import broadcast
from pyspark.sql.functions import isnull
import pandas as pd
import numpy
from pyspark.sql import Window
from pyspark.sql.functions import rank
def prepare():
os.environ["PYSPARK_PYTHON"] = "python3"
# 读取s3桶中的数据
spark = SparkSession.builder \
.master("yarn") \
.appName("CPA&GYC match refactor") \
.config("spark.driver.memory", "2g") \
.config("spark.executor.cores", "2") \
.config("spark.executor.instances", "2") \
.config("spark.executor.memory", "2g") \
.config('spark.sql.codegen.wholeStage', False) \
.config("spark.sql.execution.arrow.enabled", "true") \
.getOrCreate()
access_key = os.getenv("AWS_ACCESS_KEY_ID")
secret_key = os.getenv("AWS_SECRET_ACCESS_KEY")
if access_key is not None:
spark._jsc.hadoopConfiguration().set("fs.s3a.access.key", access_key)
spark._jsc.hadoopConfiguration().set("fs.s3a.secret.key", secret_key)
spark._jsc.hadoopConfiguration().set("fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem")
spark._jsc.hadoopConfiguration().set("com.amazonaws.services.s3.enableV4", "true")
# spark._jsc.hadoopConfiguration().set("fs.s3a.aws.credentials.provider","org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider")
spark._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3.cn-northwest-1.amazonaws.com.cn")
return spark
@pandas_udf(DoubleType(), PandasUDFType.SCALAR)
def efftiveness_with_jaro_winkler_similarity_in_hc_mapping(cn, sn):
def jaro_similarity(s1, s2):
# First, store the length of the strings
# because they will be re-used several times.
# len_s1 = 0 if s1 is None else len(s1)
# len_s2 = 0 if s2 is None else len(s2)
len_s1, len_s2 = len(s1), len(s2)
# The upper bound of the distance for being a matched character.
match_bound = max(len_s1, len_s2) // 2 - 1
# Initialize the counts for matches and transpositions.
matches = 0 # no.of matched characters in s1 and s2
transpositions = 0 # no. of transpositions between s1 and s2
flagged_1 = [] # positions in s1 which are matches to some character in s2
flagged_2 = [] # positions in s2 which are matches to some character in s1
# Iterate through sequences, check for matches and compute transpositions.
for i in range(len_s1): # Iterate through each character.
upperbound = min(i + match_bound, len_s2 - 1)
lowerbound = max(0, i - match_bound)
for j in range(lowerbound, upperbound + 1):
if s1[i] == s2[j] and j not in flagged_2:
matches += 1
flagged_1.append(i)
flagged_2.append(j)
break
flagged_2.sort()
for i, j in zip(flagged_1, flagged_2):
if s1[i] != s2[j]:
transpositions += 1
if matches == 0:
return 0
else:
return (
1
/ 3
* (
matches / len_s1
+ matches / len_s2
+ (matches - transpositions // 2) / matches
)
)
def jaro_winkler_similarity(s1, s2, p=0.1, max_l=4):
if not 0 <= max_l * p <= 1:
print("The product `max_l * p` might not fall between [0,1].Jaro-Winkler similarity might not be between 0 and 1.")
# Compute the Jaro similarity
jaro_sim = jaro_similarity(s1, s2)
# Initialize the upper bound for the no. of prefixes.
# if user did not pre-define the upperbound,
# use shorter length between s1 and s2
# Compute the prefix matches.
l = 0
# zip() will automatically loop until the end of shorter string.
for s1_i, s2_i in zip(s1, s2):
if s1_i == s2_i:
l += 1
else:
break
if l == max_l:
break
# Return the similarity value as described in docstring.
return jaro_sim + (l * p * (1 - jaro_sim))
frame = {
"NAME": cn,
"STANDARD_NAME": sn,
}
df =
|
pd.DataFrame(frame)
|
pandas.DataFrame
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import typing as tp
from pathlib import Path
from unittest.mock import patch
import pandas as pd
import numpy as np
from ...common import testing
from . import datasets
def test_get_dataset_filepath() -> None:
name = "__ Test Dataset __"
assert name not in datasets._NAMED_URLS
datasets._NAMED_URLS[name] = "file://{}".format(Path(__file__).absolute())
with patch("requests.get") as req:
req.return_value.content = b"blublu"
filepath = datasets.get_dataset_filepath(name)
assert filepath.exists()
with filepath.open("r") as f:
text = f.read()
try:
assert "blublu" in text, f"Found text:\n{text}" # this is my file :)
except AssertionError as e:
raise e
finally:
filepath.unlink()
np.testing.assert_raises(ValueError, datasets.get_dataset_filepath, "Bidule")
del datasets._NAMED_URLS[name]
assert not filepath.exists()
assert name not in datasets._NAMED_URLS # make sure it is clean
@testing.parametrized(
german_towns=(
"German towns",
"""# Hey, this is a comment
#
320.9 13024 346.5
320.9 13024 346.5
""",
[[320.9, 13024, 346.5], [320.9, 13024, 346.5]],
),
ruspini=(
"Ruspini",
""" 5 74
11 59""",
[[5, 74], [11, 59]],
),
)
def test_get_data(name: str, text: str, expected: tp.List[tp.List[float]]) -> None:
with tempfile.TemporaryDirectory() as tmp:
# create an example file
filepath = Path(tmp) / "example.txt"
with filepath.open("w") as f:
f.write(text)
# get the output
with patch("nevergrad.functions.mlda.datasets.get_dataset_filepath") as path_getter:
path_getter.return_value = filepath
output = datasets.get_data(name)
np.testing.assert_array_equal(output, expected)
@testing.parametrized(**{name: (name,) for name in datasets._NAMED_URLS})
def test_mocked_data(name: str) -> None:
with datasets.mocked_data(): # this test makes sure we are able to mock all data, so that xp tests can run
data = datasets.get_data(name)
assert isinstance(data, (np.ndarray, pd.DataFrame))
def test_make_perceptron_data() -> None:
for name, value in [("quadratic", 0.02028), ("sine", 0.14191), ("abs", 0.1424), ("heaviside", 1)]:
data = datasets.make_perceptron_data(name)
np.testing.assert_equal(data.shape, (50, 2))
np.testing.assert_almost_equal(data[28, 0], 0.1424)
np.testing.assert_almost_equal(data[28, 1], value, decimal=5, err_msg=f"Wrong value for {name}")
def test_xls_get_data() -> None:
with tempfile.TemporaryDirectory() as tmp:
# create an example file
filepath = Path(tmp) / "example.xls"
df =
|
pd.DataFrame(columns=["a", "b"], data=[[1, 2], [3, 4]])
|
pandas.DataFrame
|
import datetime as dt
import matplotlib.pyplot as plt
import lifetimes
import numpy as np
import os
import pandas as pd
import seaborn as sns
def numcard(x):
return x.nunique(), len(x)
def todateclean(x):
return pd.to_datetime(x, errors='coerce').dt.date.astype('datetime64')
"""
- info, shape, dtypes
- df.isnull().sum() #Check for null counts/ value_counts()
- Check for supposed imputed values (are there suspicious values of 0, like for Age. )
- change zeros to nans where appropriate
- Imputation of missing values
- handle stringified json
- df.dtypes # in case obj to (df.colname = df.colname.astype("category"))
- df['colname'] = pd.to_datetime(df['colname']).dt.date
- df.drop("colname", axis=1) # drop columns
- How balanced are the outcomes?
X = df.drop("diagnosis", axis=1) # just saying which axis again
Y = df["diagnosis"] # this is just a series now
col = X.columns # if we do type(col), it's an Index
X.isnull().sum() # this covers every column in the df.
def rangenorm(x):
return (x - x.mean())/(x.max() - x.min())
le = LabelEncoder()
le.fit(Y_norm)
"""
df = pd.read_csv("./ignoreland/onlineretail.csv")
df.info()
df.apply(lambda x: numcard(x))
datecols = ['InvoiceDate']
df.loc[:, datecols] = df.loc[:,datecols].apply(lambda x: todateclean(x))
dfnew = df[(df.Quantity>0) & (df.CustomerID.isnull()==False)]
dfnew['amt'] = dfnew['Quantity'] * dfnew['UnitPrice']
dfnew.describe()
from lifetimes.plotting import *
from lifetimes.utils import *
observation_period_end = '2011-12-09'
monetary_value_col = 'amt'
modeldata = summary_data_from_transaction_data(dfnew,
'CustomerID',
'InvoiceDate',
monetary_value_col=monetary_value_col,
observation_period_end=observation_period_end)
modeldata.head()
modeldata.info() # 4 floats.
# Eyeball distribution of frequency (calculated)
modeldata['frequency'].plot(kind='hist', bins=50)
print(modeldata['frequency'].describe())
print(modeldata['recency'].describe())
print(sum(modeldata['frequency'] == 0)/float(len(modeldata)))
##### Lec21
from lifetimes import BetaGeoFitter
# similar to lifelines
bgf = BetaGeoFitter(penalizer_coef=0.0) # no regularization param.
bgf.fit(modeldata['frequency'], modeldata['recency'], modeldata['T'])
print(bgf)
# See https://www.youtube.com/watch?v=guj2gVEEx4s and
# https://www.youtube.com/watch?v=gx6oHqpRgpY
## residual lifetime value is more useful construct
from lifetimes.plotting import plot_frequency_recency_matrix
plot_frequency_recency_matrix(bgf)
from lifetimes.plotting import plot_probability_alive_matrix
plot_probability_alive_matrix(bgf)
# lec 24:
# set an outer time boundary and predict cumulative purchases by that time
t = 10 # from now until now+t periods
modeldata['predicted_purchases'] = \
bgf.conditional_expected_number_of_purchases_up_to_time(t,
modeldata['frequency'],
modeldata['recency'],
modeldata['T'])
modeldata.sort_values(by='predicted_purchases').tail(5)
modeldata.sort_values(by='predicted_purchases').head(5)
# lec 25: validation of model
from lifetimes.plotting import plot_period_transactions
plot_period_transactions(bgf) # this plot shows very clearly the model performance
# in terms of transaction volume fit
# Lec 26: splitting into train and test (by time period)
summary_cal_holdout = calibration_and_holdout_data(df,
'CustomerID',
'InvoiceDate',
calibration_period_end='2011-06-08',
observation_period_end='2011-12-09')
summary_cal_holdout.head()
bgf.fit(summary_cal_holdout['frequency_cal'],
summary_cal_holdout['recency_cal'],
summary_cal_holdout['T_cal'])
from lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases
plot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)
from lifetimes.plotting import plot_history_alive
days_since_birth = 365
fig = plt.figure(figsize=(12,8))
id = 14621 # choose a customer id
sp_trans = df.loc[df['CustomerID'] == id] # specific customer's covariates
plot_history_alive(bgf, days_since_birth, sp_trans, 'InvoiceDate')
# Lec28: Subsetting to customers who repurchase.
returning_customers_summary = modeldata[modeldata['frequency']>0]
returning_customers_summary.head()
returning_customers_summary.shape
# Lec 29: gamma-gamma model for LTV
# Note: good practice to confirm small/no apparent corr for frequency and mean trxn value
# Rev per trxn: predict total monetary value.
# The Beta param for the gamma model of total spend is itself assumed gamma distributed
# that is where the name comes from.
# teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted
# mean of population average and the sample mean for person i.
# eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic
# https://antonsruberts.github.io/lifetimes-CLV/ also great additional code.
# derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf
# Output of ggf fitter:
# p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution.
# q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop
# v = the 'beta' param in gamma dist. constant upon convolution.
# -- Note that v varies among customers (ie, is gamma distributed)
from lifetimes import GammaGammaFitter
ggf = GammaGammaFitter(penalizer_coef=0.0)
ggf.fit(returning_customers_summary['frequency'],
returning_customers_summary['monetary_value'])
ggf.summary
ggf.conditional_expected_average_profit(modeldata['frequency'],
modeldata['monetary_value'])
# cond_exp_avg_profit => gives prediction of mean trxn value.
a0 = returning_customers_summary['monetary_value'].shape[0] # 2790 customers
# Total spend:
a1 = returning_customers_summary['monetary_value'].sum()
# Total time units (here, days) with purchase:
a2 = returning_customers_summary['frequency'].sum()
# Mean monetary value (over all purchase days), roughly equal to estimated v
returning_customers_summary['monetary_value'].mean()
ggf.summary
p_here = ggf.summary.iloc[0,0]
q_here = ggf.summary.iloc[1,0]
v_here = ggf.summary.iloc[2,0] # model says 486; empirical average is 477.
money_per_customer = a1/a0
###############
# review, per documentation:
bgf.summary
# r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates
# a = alpha param for beta dist of churn
# b = beta param for beta dist of churn
x = np.random.gamma(.784, 49.28,10000) # r, alpha, n
bgf.summary.loc["a",:][0]/ (bgf.summary.loc["b",:][0] + bgf.summary.loc["a",:][0])
###################################
# lec31: other models
dfnew.dtypes
dfnew_train = dfnew[dfnew.InvoiceDate < '2011-11-09']
dfnew_test = dfnew[dfnew.InvoiceDate >= '2011-11-09']
dfnew_test.shape
dfnew_train.shape
maxdate = dfnew_train.InvoiceDate.max()
mindate = dfnew_train.InvoiceDate.min()
dfnew_train['duration'] = (maxdate - dfnew_train.InvoiceDate)/np.timedelta64(1,'D')
dfsum1 = dfnew_train.groupby(['CustomerID'])['duration'].min().reset_index()
dfsum1.rename(columns = {'duration':'lasttime'}, inplace=True) # time from lasttime to now
dfsum2 = dfnew_train.groupby(['CustomerID'])['duration'].max().reset_index()
dfsum2.rename(columns = {'duration':'firsttime'}, inplace=True) # time from firsttime to now
dfnew_train['freq'] = 1
dfsum3 = dfnew_train.groupby(['CustomerID'])['freq'].sum().reset_index() # count of transactions by customer
dfnew_train['freq3m'] = 1
dfsum4 = dfnew_train[dfnew_train['duration'] < 91].groupby(['CustomerID'])['freq3m'].sum().reset_index()
# now let's merge the 3 customer-level datasets together.
# pd.concat uses indexes as the join keys,
from functools import reduce
dfs = [dfsum1, dfsum2, dfsum3, dfsum4]
dfsum = reduce(lambda left, right: pd.merge(left, right, on=['CustomerID'], how='outer'), dfs)
dfsum.shape
[_ for _ in map(lambda x: x.shape, dfs)]
dfsum.head()
###################
other_data = pd.read_csv("./ignoreland/oth.csv")
other_data.head()
dfsum =
|
pd.merge(dfsum, other_data, on=['CustomerID'], how='left')
|
pandas.merge
|
import re
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
import pandas as pd
from pandas import IntervalIndex, MultiIndex, RangeIndex
import pandas.util.testing as tm
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([("A", 1), ("A", 2)])
assert i.codes[0].dtype == "int8"
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(40)])
assert i.codes[1].dtype == "int8"
i = MultiIndex.from_product([["a"], range(400)])
assert i.codes[1].dtype == "int16"
i = MultiIndex.from_product([["a"], range(40000)])
assert i.codes[1].dtype == "int32"
i = pd.MultiIndex.from_product([["a"], range(1000)])
assert (i.codes[0] >= 0).all()
assert (i.codes[1] >= 0).all()
def test_values_boxed():
tuples = [
(1, pd.Timestamp("2000-01-01")),
(2, pd.NaT),
(3, pd.Timestamp("2000-01-03")),
(1, pd.Timestamp("2000-01-04")),
(2, pd.Timestamp("2000-01-02")),
(3, pd.Timestamp("2000-01-03")),
]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
# TODO(GH-24559): Remove the FutureWarning
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
aware = pd.DatetimeIndex(ints, tz="US/Central")
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq="D")
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = list(range(70000))
minor_axis = list(range(10))
major_codes = np.arange(70000)
minor_codes = np.repeat(range(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
# inconsistent
major_codes = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(
levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]
)
assert index.is_unique is False
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product(
[np.arange(1000), np.arange(1000)], names=["one", "two"]
)
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(len(index), dtype="intp"))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [["A", "B"], [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]]
idx = pd.MultiIndex.from_product(vals, names=["str", "dt"])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
msg = "isna is not defined for MultiIndex"
with pytest.raises(NotImplementedError, match=msg):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_below_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_below_1000000.loc[(3, 0), "dest"]
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]), columns=["dest"]
)
with pytest.raises(KeyError, match=r"^\(-1, 0\)$"):
df_above_1000000.loc[(-1, 0), "dest"]
with pytest.raises(KeyError, match=r"^\(3, 0\)$"):
df_above_1000000.loc[(3, 0), "dest"]
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame(
{"a": r, "b": r}, index=pd.MultiIndex.from_tuples([(x, x) for x in r])
)
msg = "'Series' object has no attribute 'foo'"
with pytest.raises(AttributeError, match=msg):
df["a"].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, codes = idx.levels, idx.codes
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile("does not support mutable operations")
with pytest.raises(TypeError, match=mutable_regex):
levels[0] = levels[0]
with pytest.raises(TypeError, match=mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with pytest.raises(TypeError, match=mutable_regex):
codes[0] = codes[0]
with pytest.raises(ValueError, match="assignment destination is read-only"):
codes[0][0] = codes[0][0]
# and for names
names = idx.names
with pytest.raises(TypeError, match=mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([["A", "A", "B", "B", "B"], [1, 2, 1, 2, 3]])
assert ind.is_monotonic
ind.set_levels([["A", "B"], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({"foo": foo.stack(), "bar": bar.stack()}, axis=1)
df.index.names = ["fizz", "buzz"]
str(df)
expected = pd.DataFrame(
{"bar": np.arange(100), "foo": np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)], names=["fizz", "buzz"]
),
)
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values("fizz")
expected = pd.Int64Index(np.arange(10), name="fizz").repeat(10)
|
tm.assert_index_equal(result, expected)
|
pandas.util.testing.assert_index_equal
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import integrate, optimize
from scipy.signal import savgol_filter
from dane import population as popu
dias_restar = 4 # Los últimos días de información que no se tienen en cuenta
dias_pred = 31 # Días sobre los cuáles se hará la predicción a corto plazo
media_movil = 4 # Días que se promediaran en las series para mitigar errores en los datos
Ciudades_dicc = {'Bog': 'Bogotá D.C.', 'Mde': 'Medellín', 'Cal': 'Cali', 'Brr': 'Barranquilla',
'Ctg': 'Cartagena de Indias'}
Ciudades = ['Bog','Mde','Cal', 'Brr', 'Ctg']
Covid_Col = pd.read_csv("https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD", sep=',',
encoding='utf-8', low_memory=False)
def limpieza_datos():
# Covid_Col=pd.read_csv("C:\Users\danie\DS\vagrant4docker-master\laboratorios\covid-19-guaya-kilera\Casos_positivos_de_COVID-19_en_Colombia.csv", sep=',', encoding='utf-8', low_memory=False)
Covid_Col.drop(['ID de caso', 'Código DIVIPOLA', 'Departamento o Distrito ', 'País de procedencia', 'Tipo',
'Codigo departamento',
'Codigo pais', 'Tipo recuperación', 'Pertenencia etnica', 'Nombre grupo etnico', 'atención'],
axis=1, inplace=True)
Covid_Col['FIS'] = Covid_Col['FIS'].replace('Asintomático', np.nan)
Covid_Col['FIS'] =
|
pd.to_datetime(Covid_Col['FIS'].str[:10])
|
pandas.to_datetime
|
"""
Test file for analysis of pNEUMA data
"""
from pneumapackage.settings import *
import pneumapackage.compute as cp
from pneumapackage.__init__ import read_pickle, write_pickle, path_data, path_results
import pneumapackage.iodata as rd
import test_network as tn
import test_data as td
import numpy as np
import pandas as pd
import leuvenmapmatching.util.dist_euclidean as distxy
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from tqdm.contrib import tenumerate
from tqdm import tqdm
import os
"""
Up until now we have list of dataframes, trajectories, with a column with the matched edge in the extracted OSM network
The line trajectories can be used to count vehicles crossing specific locations in the network, keeping the individual
information for more specific aggregations afterwards (augmented loop detector data)
Place detectors on the edges in the used network and select specific edges
--> using Qgis to select manually the needed edge ids
Input parameters:
- 20 m = width of detector edges, make sure they span the whole road
- 10 m = distance from intersection
- True = place double virtual loops
- 1 m = loop distance
- 2 = number of detectors on every link
"""
def test_crossings(line_traj, df_det, **kwargs):
df_crossings = cp.vehicle_crossings(line_traj, df_det, **kwargs)
return df_crossings
def get_traj_crossings(track, df_crossings):
df_crossings = df_crossings[~df_crossings[track].isna()][track]
return df_crossings
def get_vehicle_types(group_id):
pid, _, _ = td.get_hdf_names(group_id)
hdf_path = rd.get_hdf_path()
vehicle_types = rd.get_from_hdf(hdf_path, key_id=pid, result='ids')
vehicle_types = vehicle_types.loc[:, ['track_id', 'type']]
return vehicle_types
def case_configuration(group_id, det_obj, edges):
"""
Get all the needed information of the detectors for the chosen edges, as well as only those trajectories that map
onto one of the edges.
Parameters
----------
group_id
det_obj
edges
Returns
-------
"""
ds = det_obj.detector_selection(edges)
id_ft_pan = list(set(det_obj.features.index.get_level_values(0)) & set(edges))
id_ft_pan.sort()
ds_ft = det_obj.features.loc[(id_ft_pan,)]
ds_ft.attrs = det_obj.features.attrs
lt = td.get_lt(group_id=group_id, edges=edges, gdf=True)
return ds, ds_ft, lt
def edges_crossings(group_id, crossing_edges, case_number=1, det_obj=None, bearing_difference=90, strict_match=True,
folder=path_results):
dataset_name = rd.get_path_dict()['groups'][group_id].replace('/', '_')
try:
df_ft = read_pickle(f'features_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
df_det = read_pickle(f'detectors_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
except FileNotFoundError:
if det_obj is None:
det_obj = tn.test_detectors(tn.test_network(), path_data)
ds, ds_ft, lt = case_configuration(group_id, det_obj, edges=crossing_edges)
# Determine crossings
df_ft = cp.vehicle_crossings(lt, ds_ft, bearing_difference=bearing_difference, strict_match=strict_match)
df_det = cp.vehicle_crossings(lt, ds, bearing_difference=bearing_difference, strict_match=strict_match)
write_pickle(df_ft, f'features_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
write_pickle(df_det, f'detectors_crossing_{dataset_name}_bd{bearing_difference}_case{case_number}',
os.path.join(folder, 'crossings'))
return df_ft, df_det, dataset_name
def signal_timings(df_crossings, time_rows=('t1', 't2'), time_step=1000):
df_det = df_crossings.sort_index()
df_det = df_det.reset_index()
df_sel = df_det.loc[df_det['detector'].isin(list(time_rows))]
df_sel.set_index(['edge', 'detector'], inplace=True)
df_sel = df_sel.transpose()
max_time = int(max(df_sel.max()) + time_step)
df_cycle = {'time_step': [], 'passing': []}
for t in tqdm(range(time_step, max_time, time_step)):
df = df_sel[(df_sel >= (t - time_step)) & (df_sel < t)]
df_cycle['time_step'].append(t), df_cycle['passing'].append(df.count().values)
df_cycle = pd.DataFrame(df_cycle['passing'], index=df_cycle['time_step'], columns=df_sel.columns)
df_cycle.index.name = 'time_step'
# df_cycle = df_st.mask(df_st > 0, 1)
# df_cum = df_st.cumsum()
return df_cycle
def cycle_times(df_cycle, edge, column=None, thresh_filter=10000, filter_step=3, thresh=5000):
if column is None:
column = 't2'
tmp = df_cycle.loc[:, edge].copy()
tmp.loc[:, 'green'] = 0
tmp.loc[:, 'edge'] = edge
step = list(set(np.diff(tmp.index)))[0]
tmp2 = tmp.loc[list(set(np.r_[tmp[tmp[column] > 0].index, tmp.index[0], tmp.index[-1]]))].copy()
tmp2.sort_index(inplace=True)
tmp2.reset_index(inplace=True)
tmp2.loc[:, 'filter_b'] = tmp2.loc[:, 'time_step'].diff(filter_step)
tmp2.loc[:, 'filter_a'] = abs(tmp2.loc[:, 'time_step'].diff(-filter_step))
filter_index = tmp2.loc[(tmp2.filter_b > thresh_filter) & (tmp2.filter_a > thresh_filter)].index
tmp2.loc[filter_index, 'filter_b'] = 0
tmp2.loc[filter_index, 'filter_a'] = 0
tmp2 = tmp2[~tmp2.index.isin(tmp2[(tmp2.filter_a == 0) & (tmp2.filter_b == 0)].index)]
tmp2.loc[:, 'before'] = tmp2.loc[:, 'time_step'].diff(1)
tmp2.loc[:, 'after'] = abs(tmp2.loc[:, 'time_step'].diff(-1))
tmp2.loc[tmp2.before <= thresh, 'before'] = 0
tmp2.loc[tmp2.after <= thresh, 'after'] = 0
tmp2 = tmp2.loc[tmp2[column] > 0]
tmp2.loc[:, 'green_start'] = 0
tmp2.loc[:, 'green_end'] = 0
tmp2.loc[tmp2.before > 0, 'green_start'] = 1
tmp2.loc[tmp2.after > 0, 'green_end'] = 1
tmp2 = tmp2[tmp2.index.isin(tmp2[(tmp2.green_start > 0) | (tmp2.green_end > 0)].index)]
if len(tmp2.loc[(tmp2.green_start > 0) & (tmp2.green_end > 0)]):
print('Adjust filters')
raise ValueError('Invalid instances detected')
tmp2 = tmp2[~tmp2.index.isin(tmp2[(tmp2.green_start > 0) & (tmp2.green_end > 0)].index)]
tmp2.set_index('time_step', inplace=True)
tmp2.loc[:, 'green_time'] = 0
tmp2.loc[:, 'red_time'] = tmp2.before
index_greens = []
ls_tmp = []
row = 0
for i, j in tmp2.iterrows():
if row == 0:
if j['green_end'] > 0:
index_greens.extend(np.arange(tmp.index[0], i + step, step).tolist())
tmp2.loc[i, 'green_time'] = i - tmp.index[0] - step
else:
ls_tmp.append(i)
row += 1
elif row == len(tmp2) - 1:
if j['green_start'] > 0:
index_greens.extend(np.arange(i, tmp.index[-1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = tmp.index[-1] - i
else:
ls_tmp.append(i)
index_greens.extend(np.arange(ls_tmp[0], ls_tmp[1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = ls_tmp[1] - ls_tmp[0]
ls_tmp = []
else:
if j['green_end'] > 0:
ls_tmp.append(i)
index_greens.extend(np.arange(ls_tmp[0], ls_tmp[1] + step, step).tolist())
tmp2.loc[i, 'green_time'] = ls_tmp[1] - ls_tmp[0]
ls_tmp = []
else:
ls_tmp.append(i)
row += 1
tmp.loc[index_greens, 'green'] = 1
return tmp2, tmp
def create_cumulative(tuple_crossings, edge_selection, turn='other', time_step=1000, plot=False, statistics=False):
assert turn in ['incoming', 'outgoing', 'turn_right', 'turn_left', 'straight', 'other']
df_det = tuple_crossings[1]
data_title = tuple_crossings[2]
df_det = df_det.sort_index()
df_sel = df_det.loc[edge_selection, :]
df = df_sel.dropna(axis=1)
df = df.transpose()
max_time = int(max(df.max()) + time_step)
df_st = {'time_step': [], 'count': []}
df_tt = df.astype('float64')
df_tt = df_tt.assign(travel_time=df_tt[edge_selection[-1]] - df_tt[edge_selection[0]])
for t in range(time_step, max_time, time_step):
tmp = df[(df >= (t - time_step)) & (df < t)]
df_st['time_step'].append(t), df_st['count'].append(tmp.count().values)
df_st = pd.DataFrame(df_st['count'], index=df_st['time_step'],
columns=[f'count_{i[0]}_{i[1]}' for i in edge_selection])
df_st = df_st.assign(veh_diff=df_st[f'count_{edge_selection[0][0]}_{edge_selection[0][1]}'] -
df_st[f'count_{edge_selection[-1][0]}_{edge_selection[-1][1]}'])
for i in edge_selection:
df_st.loc[:, f'cumulative_{i[0]}_{i[1]}'] = df_st[f'count_{i[0]}_{i[1]}'].cumsum()
df_tt = df_tt.assign(travel_time_sec=df_tt.travel_time / 1000)
if statistics:
print(f'Basic statistics of travel time from {edge_selection[0]} to {edge_selection[-1]}: '
f'{df_tt.travel_time_sec.describe()}')
if plot:
fig, ax = plt.subplots(figsize=(10, 8))
ind_link = 0
for i in edge_selection:
ax.plot(df_st.index / 1000, df_st[f'count_{i[0]}_{i[1]}'],
color=qual_colorlist[ind_link], label=f'{i[0]}_{i[1]}')
ind_link += 1
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Vehicles passing [veh]')
plt.close()
fig, ax = plt.subplots()
ind_link = 0
for i in edge_selection:
ax.plot(df_st.index / 1000, df_st[f'count_{i[0]}_{i[1]}'].cumsum(), color=qual_colorlist[ind_link],
label=f'{i[0]}_{i[1]}')
ind_link += 1
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Cumulative count [veh]')
ax.set_title(f'{data_title}_{turn}')
fig.savefig(f'{data_title}_{edge_selection[0][0]}_{edge_selection[-1][0]}_{turn}')
fig, ax = plt.subplots()
ax.plot(df_st.index / 1000, df_st['veh_diff'].cumsum(), label='vehicle accumulation',
color=qual_colorlist[0])
ax.plot(df_st.index / 1000, df_st[f'count_{edge_selection[-1][0]}_{edge_selection[-1][1]}'],
label='vehicles passing downstream',
color=qual_colorlist[5])
ax.grid(True)
ax.legend()
ax.set_xlabel('Time [s]')
ax.set_ylabel('Vehicles [veh]')
ax.set_title(f'{data_title} {turn} accumulation')
fig.savefig(f'{data_title}_{edge_selection[0][0]}_{edge_selection[-1][0]}_accumulation')
return df_st, df_tt
def test_cycle_times(group_id, crossing_edges, **kwargs):
_, df_crossings, _ = edges_crossings(group_id, crossing_edges, **kwargs)
df_cycle = signal_timings(df_crossings)
return df_cycle
def test_cumulative(group_id, crossing_edges, edge_selection, **kwargs):
tuple_crossings = edges_crossings(group_id, crossing_edges, **kwargs)
df_st, df_tt = create_cumulative(tuple_crossings, edge_selection)
return df_st, df_tt
def create_output_table(df_crossing_sel, group_id, save_csv=False, filename=None):
hdf_path = rd.get_hdf_path()
group_path = rd.get_group(group_id)
df_dict = {'track_id': [], 'from': [], 'to': [], 't_from': [], 't_to': [], 'delta_t': []}
for i in df_crossing_sel:
df = df_crossing_sel[i][df_crossing_sel[i].notna()]
if len(df) % 2 == 0:
nr = int(len(df) / 2)
df = df.sort_values()
df_idx = df.index.get_level_values(0).to_list()
df_val = df.values
df_dict['track_id'].extend([i] * nr)
df_dict['from'].extend(df_idx[::2])
df_dict['t_from'].extend(df_val[::2])
df_dict['to'].extend(df_idx[1::2])
df_dict['t_to'].extend(df_val[1::2])
df_dict['delta_t'].extend(df_val[1::2] - df_val[::2])
else:
continue
df = pd.DataFrame(df_dict)
tr_id = rd.get_from_hdf(hdf_path, key_id=group_path + '/all_id', result='ids')
df = df.merge(tr_id[['track_id', 'type']], how='left', on='track_id')
if save_csv:
fn = filename
if filename is None:
fn = 'traj_data.csv'
df.to_csv(path_data + fn)
return df
def create_xt(edge, group_id, network_df, crossing_edges, show_det=None, plot=False, colormap='gist_rainbow',
lines=False, veh_type=None, psize=1, bearing_difference=90,
strict_match=True, folder=path_results, **kwargs):
edge_length = network_df.loc[network_df['_id'] == edge, 'length'].values[0]
vt_str = "all"
_, df_det, data_title = edges_crossings(group_id, crossing_edges, bearing_difference=bearing_difference,
strict_match=strict_match)
df_sel = df_det.loc[edge]
df_sel = df_sel.dropna(axis=1, how='any')
lt = td.get_lt_from_id(df_sel.columns.to_list(), group_id=group_id, gdf=False, **kwargs)
df_xt = pd.DataFrame()
s = {'pos_start': [], 'pos_end': [], 'track_id': []}
e1 = (network_df.loc[network_df['_id'] == edge, ['x1', 'y1']].values[0])
e2 = (network_df.loc[network_df['_id'] == edge, ['x2', 'y2']].values[0])
df_transpose = df_sel.transpose()
c1 = [(xy) for xy in zip(df_transpose.loc[:, 'cross_x1'], df_transpose.loc[:, 'cross_y1'])]
c2 = [(xy) for xy in zip(df_transpose.loc[:, 'cross_x2'], df_transpose.loc[:, 'cross_y2'])]
for ind, el in enumerate(lt.index.get_level_values(0).unique()):
tmp = lt.loc[(el, slice(df_sel.loc['rid1', el], int(df_sel.loc['rid2', el] + 1))), :].copy()
tmp2 = tmp.apply(help_proj, e1=e1, e2=e2, axis=1)
_, t1 = distxy.project(e1, e2, c1[ind])
_, t2 = distxy.project(e1, e2, c2[ind])
s['pos_start'].append(t1), s['pos_end'].append(t2), s['track_id'].append(el)
# tmp['proj'] = tmp2
tmp_dist, _, tmp_proj = zip(*tmp2)
tmp['lateral_dist'] = tmp_dist
tmp['proj'] = tmp_proj
tmp['avg_speed'] = tmp['line_length_yx'] * 3.6
df_xt =
|
pd.concat([df_xt, tmp], axis=0)
|
pandas.concat
|
# -*- coding: utf-8 -*-
import operator
import pytest
import numpy as np
from pandas.compat import range
import pandas as pd
import pandas.util.testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons(object):
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
def test_mixed_comparison(self):
# GH 13128, GH 22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before #22163, not sure when)
df = pd.DataFrame([['1989-08-01', 1], ['1989-08-01', 2]])
other = pd.DataFrame([['a', 'b'], ['c', 'd']])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False],
[True, False],
[False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(np.random.randn(8, 3), index=range(8),
columns=['A', 'B', 'C'])
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
const = 2
result = getattr(df, opname)(const).get_dtype_counts()
tm.assert_series_equal(result, pd.Series([2], ['bool']))
@pytest.mark.parametrize('opname', ['eq', 'ne', 'gt', 'lt', 'ge', 'le'])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df =
|
pd.DataFrame({'x': [1, 2, 3], 'y': [1., 2., 3.]})
|
pandas.DataFrame
|
import pandas as pd
## convert to time
df['date'] =
|
pd.to_datetime(df['ts'],unit='ms')
|
pandas.to_datetime
|
import sys
from Bio import SeqIO
import pandas as pd
##########################################################################
sys.stderr = sys.stdout = open(snakemake.log[0], "w")
##########################################################################
tsv_missing = snakemake.input.tsv_missing
faa_missing = snakemake.input.faa_missing
tsv_virsorter = snakemake.input.tsv_virsorter
faa_virsorter = snakemake.input.faa_virsorter
transl_table = snakemake.input.translation_table
transl_dict =
|
pd.read_table(transl_table, index_col=0)
|
pandas.read_table
|
from unittest import TestCase
from unittest.mock import Mock, patch
import copulas
import numpy as np
import pandas as pd
import pytest
from copulas import univariate
from rdt.transformers.null import NullTransformer
from rdt.transformers.numerical import (
BayesGMMTransformer, GaussianCopulaTransformer, NumericalBoundedTransformer,
NumericalRoundedBoundedTransformer, NumericalRoundedTransformer, NumericalTransformer)
class TestNumericalTransformer(TestCase):
def test___init__super_attrs(self):
"""super() arguments are properly passed and set as attributes."""
nt = NumericalTransformer(dtype='int', nan='mode', null_column=False)
assert nt.dtype == 'int'
assert nt.nan == 'mode'
assert nt.null_column is False
def test_get_output_types(self):
"""Test the ``get_output_types`` method when a null column is created.
When a null column is created, this method should apply the ``_add_prefix``
method to the following dictionary of output types:
output_types = {
'value': 'float',
'is_null': 'float'
}
Setup:
- initialize a ``NumericalTransformer`` transformer which:
- sets ``self.null_transformer`` to a ``NullTransformer`` where
``self.null_column`` is True.
- sets ``self.column_prefix`` to a string.
Output:
- the ``output_types`` dictionary, but with the ``self.column_prefix``
added to the beginning of the keys.
"""
# Setup
transformer = NumericalTransformer()
transformer.null_transformer = NullTransformer(fill_value='fill')
transformer.null_transformer._null_column = True
transformer.column_prefix = 'a#b'
# Run
output = transformer.get_output_types()
# Assert
expected = {
'a#b.value': 'float',
'a#b.is_null': 'float'
}
assert output == expected
def test_is_composition_identity_null_transformer_true(self):
"""Test the ``is_composition_identity`` method with a ``null_transformer``.
When the attribute ``null_transformer`` is not None and a null column is not created,
this method should simply return False.
Setup:
- initialize a ``NumericalTransformer`` transformer which sets
``self.null_transformer`` to a ``NullTransformer`` where
``self.null_column`` is False.
Output:
- False
"""
# Setup
transformer = NumericalTransformer()
transformer.null_transformer = NullTransformer(fill_value='fill')
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test_is_composition_identity_null_transformer_false(self):
"""Test the ``is_composition_identity`` method without a ``null_transformer``.
When the attribute ``null_transformer`` is None, this method should return
the value stored in the ``COMPOSITION_IS_IDENTITY`` attribute.
Setup:
- initialize a ``NumericalTransformer`` transformer which sets
``self.null_transformer`` to None.
Output:
- the value stored in ``self.COMPOSITION_IS_IDENTITY``.
"""
# Setup
transformer = NumericalTransformer()
transformer.null_transformer = None
# Run
output = transformer.is_composition_identity()
# Assert
assert output is True
def test__learn_rounding_digits_more_than_15_decimals(self):
"""Test the _learn_rounding_digits method with more than 15 decimals.
If the data has more than 15 decimals, None should be returned.
Input:
- An array that contains floats with more than 15 decimals.
Output:
- None
"""
data = np.random.random(size=10).round(20)
output = NumericalTransformer._learn_rounding_digits(data)
assert output is None
def test__learn_rounding_digits_less_than_15_decimals(self):
"""Test the _learn_rounding_digits method with less than 15 decimals.
If the data has less than 15 decimals, the maximum number of decimals
should be returned.
Input:
- An array that contains floats with a maximum of 3 decimals and a NaN.
Output:
- 3
"""
data = np.array([10, 0., 0.1, 0.12, 0.123, np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == 3
def test__learn_rounding_digits_negative_decimals_float(self):
"""Test the _learn_rounding_digits method with floats multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains floats that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230., 12300., 123000., np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == -1
def test__learn_rounding_digits_negative_decimals_integer(self):
"""Test the _learn_rounding_digits method with integers multiples of powers of 10.
If the data has all multiples of 10, 100, or any other higher power of 10,
the output is the negative number of decimals representing the corresponding
power of 10.
Input:
- An array that contains integers that are multiples of powers of 10, 100 and 1000
and a NaN.
Output:
- -1
"""
data = np.array([1230, 12300, 123000, np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output == -1
def test__learn_rounding_digits_all_nans(self):
"""Test the _learn_rounding_digits method with data that is all NaNs.
If the data is all NaNs, expect that the output is None.
Input:
- An array of NaNs.
Output:
- None
"""
data = np.array([np.nan, np.nan, np.nan, np.nan])
output = NumericalTransformer._learn_rounding_digits(data)
assert output is None
def test__fit(self):
"""Test the ``_fit`` method with numpy.array.
Validate that the ``_dtype`` and ``.null_transformer.fill_value`` attributes
are set correctly.
Setup:
- initialize a ``NumericalTransformer`` with the ``nan` parameter set to ``'nan'``.
Input:
- a pandas dataframe containing a None.
Side effect:
- it sets the ``null_transformer.fill_value``.
- it sets the ``_dtype``.
"""
# Setup
data = pd.DataFrame([1.5, None, 2.5], columns=['a'])
transformer = NumericalTransformer(dtype=float, nan='nan')
# Run
transformer._fit(data)
# Asserts
expect_fill_value = 'nan'
assert transformer.null_transformer.fill_value == expect_fill_value
expect_dtype = float
assert transformer._dtype == expect_dtype
def test__fit_rounding_none(self):
"""Test _fit rounding parameter with ``None``
If the rounding parameter is set to ``None``, the ``_fit`` method
should not set its ``rounding`` or ``_rounding_digits`` instance
variables.
Input:
- An array with floats rounded to one decimal and a None value
Side Effect:
- ``rounding`` and ``_rounding_digits`` continue to be ``None``
"""
# Setup
data = pd.DataFrame([1.5, None, 2.5], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding=None)
transformer._fit(data)
# Asserts
assert transformer.rounding is None
assert transformer._rounding_digits is None
def test__fit_rounding_int(self):
"""Test _fit rounding parameter with int
If the rounding parameter is set to ``None``, the ``_fit`` method
should not set its ``rounding`` or ``_rounding_digits`` instance
variables.
Input:
- An array with floats rounded to one decimal and a None value
Side Effect:
- ``rounding`` and ``_rounding_digits`` are the provided int
"""
# Setup
data = pd.DataFrame([1.5, None, 2.5], columns=['a'])
expected_digits = 3
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding=expected_digits)
transformer._fit(data)
# Asserts
assert transformer.rounding == expected_digits
assert transformer._rounding_digits == expected_digits
def test__fit_rounding_auto(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
``_fit`` should learn the ``_rounding_digits`` to be the max
number of decimal places seen in the data.
Input:
- Array of floats with up to 4 decimals
Side Effect:
- ``_rounding_digits`` is set to 4
"""
# Setup
data = pd.DataFrame([1, 2.1, 3.12, 4.123, 5.1234, 6.123, 7.12, 8.1, 9], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == 4
def test__fit_rounding_auto_large_numbers(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``
and the data is very large, ``_fit`` should learn
``_rounding_digits`` to be the biggest number of 0s
to round to that keeps the data the same.
Input:
- Array of data with numbers between 10^10 and 10^20
Side Effect:
- ``_rounding_digits`` is set to the minimum exponent seen in the data
"""
# Setup
exponents = [np.random.randint(10, 20) for i in range(10)]
big_numbers = [10**exponents[i] for i in range(10)]
data = pd.DataFrame(big_numbers, columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -min(exponents)
def test__fit_rounding_auto_max_decimals(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
``_fit`` should learn the ``_rounding_digits`` to be the max
number of decimal places seen in the data. The max
amount of decimals that floats can be accurately compared
with is 15. If the input data has values with more than
14 decimals, we will not be able to accurately learn the
number of decimal places required, so we do not round.
Input:
- Array with a value that has 15 decimals
Side Effect:
- ``_rounding_digits`` is set to ``None``
"""
# Setup
data = pd.DataFrame([0.000000000000001], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits is None
def test__fit_rounding_auto_max_inf(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the data contains infinite values, ``_fit`` should
learn the ``_rounding_digits`` to be the min
number of decimal places seen in the data with
the infinite values filtered out.
Input:
- Array with ``np.inf`` as a value
Side Effect:
- ``_rounding_digits`` is set to max seen in rest of data
"""
# Setup
data = pd.DataFrame([15000, 4000, 60000, np.inf], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -3
def test__fit_rounding_auto_max_zero(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the max in the data is 0, ``_fit`` should
learn the ``_rounding_digits`` to be 0.
Input:
- Array with 0 as max value
Side Effect:
- ``_rounding_digits`` is set to 0
"""
# Setup
data = pd.DataFrame([0, 0, 0], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == 0
def test__fit_rounding_auto_max_negative(self):
"""Test _fit rounding parameter with ``'auto'``
If the ``rounding`` parameter is set to ``'auto'``,
and the max in the data is negative, the ``_fit`` method
should learn ``_rounding_digits`` to be the min number
of digits seen in those negative values.
Input:
- Array with negative max value
Side Effect:
- ``_rounding_digits`` is set to min number of digits in array
"""
# Setup
data = pd.DataFrame([-500, -220, -10], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan', rounding='auto')
transformer._fit(data)
# Asserts
assert transformer._rounding_digits == -1
def test__fit_min_max_none(self):
"""Test _fit min and max parameters with ``None``
If the min and max parameters are set to ``None``,
the ``_fit`` method should not set its ``min`` or ``max``
instance variables.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` stay ``None``
"""
# Setup
data = pd.DataFrame([1.5, None, 2.5], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan',
min_value=None, max_value=None)
transformer._fit(data)
# Asserts
assert transformer._min_value is None
assert transformer._max_value is None
def test__fit_min_max_int(self):
"""Test _fit min and max parameters with int values
If the min and max parameters are set to an int,
the ``_fit`` method should not change them.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` remain unchanged
"""
# Setup
data = pd.DataFrame([1.5, None, 2.5], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan',
min_value=1, max_value=10)
transformer._fit(data)
# Asserts
assert transformer._min_value == 1
assert transformer._max_value == 10
def test__fit_min_max_auto(self):
"""Test _fit min and max parameters with ``'auto'``
If the min or max parameters are set to ``'auto'``
the ``_fit`` method should learn them from the
_fitted data.
Input:
- Array of floats and null values
Side Effect:
- ``_min_value`` and ``_max_value`` are learned
"""
# Setup
data = pd.DataFrame([-100, -5000, 0, None, 100, 4000], columns=['a'])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan',
min_value='auto', max_value='auto')
transformer._fit(data)
# Asserts
assert transformer._min_value['a'] == -5000
assert transformer._max_value['a'] == 4000
def test__transform(self):
"""Test the ``_transform`` method.
Validate that this method calls the ``self.null_transformer.transform`` method once.
Setup:
- create an instance of a ``NumericalTransformer`` and set ``self.null_transformer``
to a ``NullTransformer``.
Input:
- a pandas series.
Output:
- the transformed numpy array.
"""
# Setup
data = pd.Series([1, 2, 3])
transformer = NumericalTransformer()
transformer.null_transformer = Mock()
# Run
transformer._transform(data)
# Assert
assert transformer.null_transformer.transform.call_count == 1
def test__reverse_transform_rounding_none(self):
"""Test ``_reverse_transform`` when ``rounding`` is ``None``
The data should not be rounded at all.
Input:
- Random array of floats between 0 and 1
Output:
- Input array
"""
# Setup
data = np.random.random(10)
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._rounding_digits = None
result = transformer._reverse_transform(data)
# Assert
np.testing.assert_array_equal(result, data)
def test__reverse_transform_rounding_none_integer(self):
"""Test ``_reverse_transform`` when ``rounding`` is ``None`` and the dtype is integer.
The data should be rounded to 0 decimals and returned as integer values.
Input:
- Array of multiple float values with decimals.
Output:
- Input array rounded an converted to integers.
"""
# Setup
data = np.array([0., 1.2, 3.45, 6.789])
# Run
transformer = NumericalTransformer(dtype=np.int64, nan=None)
transformer._rounding_digits = None
transformer._dtype = np.int64
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0, 1, 3, 7])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_none_with_nulls(self):
"""Test ``_reverse_transform`` when ``rounding`` is ``None`` and there are nulls.
The data should not be rounded at all.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array as entered, replacing the indicated value with a Nan.
"""
# Setup
data = [
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
]
data = pd.DataFrame(data, columns=['a', 'b'])
# Run
transformer = NumericalTransformer()
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer._rounding_digits = None
transformer._dtype = float
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0., 1.2, np.nan, 6.789])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_none_with_nulls_dtype_int(self):
"""Test ``_reverse_transform`` when rounding is None, dtype is int and there are nulls.
The data should be rounded to 0 decimals and returned as float values with
nulls in the right place.
Input:
- 2d Array of multiple float values with decimals and a column setting at least 1 null.
Output:
- First column of the input array rounded, replacing the indicated value with a Nan,
and kept as float values.
"""
# Setup
data = np.array([
[0., 0.],
[1.2, 0.],
[3.45, 1.],
[6.789, 0.],
])
# Run
transformer = NumericalTransformer()
null_transformer = Mock()
null_transformer.reverse_transform.return_value = np.array([0., 1.2, np.nan, 6.789])
transformer.null_transformer = null_transformer
transformer._rounding_digits = None
transformer._dtype = int
result = transformer._reverse_transform(data)
# Assert
expected = np.array([0., 1., np.nan, 7.])
np.testing.assert_array_equal(result, expected)
def test__reverse_transform_rounding_positive_rounding(self):
"""Test ``_reverse_transform`` when ``rounding`` is a positive int
The data should round to the maximum number of decimal places
set in the ``_rounding_digits`` value.
Input:
- Array with decimals
Output:
- Same array rounded to the provided number of decimal places
"""
# Setup
data = np.array([1.1111, 2.2222, 3.3333, 4.44444, 5.555555])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._rounding_digits = 2
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([1.11, 2.22, 3.33, 4.44, 5.56])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_rounding_negative_rounding_int(self):
"""Test ``_reverse_transform`` when ``rounding`` is a negative int
The data should round to the number set in the ``_rounding_digits``
attribute and remain ints.
Input:
- Array with with floats above 100
Output:
- Same array rounded to the provided number of 0s
- Array should be of type int
"""
# Setup
data = np.array([2000.0, 120.0, 3100.0, 40100.0])
# Run
transformer = NumericalTransformer(dtype=int, nan=None)
transformer._dtype = int
transformer._rounding_digits = -3
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2000, 0, 3000, 40000])
np.testing.assert_array_equal(result, expected_data)
assert result.dtype == int
def test__reverse_transform_rounding_negative_rounding_float(self):
"""Test ``_reverse_transform`` when ``rounding`` is a negative int
The data should round to the number set in the ``_rounding_digits``
attribute and remain floats.
Input:
- Array with with larger numbers
Output:
- Same array rounded to the provided number of 0s
- Array should be of type float
"""
# Setup
data = np.array([2000.0, 120.0, 3100.0, 40100.0])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._rounding_digits = -3
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2000.0, 0.0, 3000.0, 40000.0])
np.testing.assert_array_equal(result, expected_data)
assert result.dtype == float
def test__reverse_transform_rounding_zero(self):
"""Test ``_reverse_transform`` when ``rounding`` is a negative int
The data should round to the number set in the ``_rounding_digits``
attribute.
Input:
- Array with with larger numbers
Output:
- Same array rounded to the provided number of 0s
"""
# Setup
data = np.array([2000.554, 120.2, 3101, 4010])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._rounding_digits = 0
result = transformer._reverse_transform(data)
# Assert
expected_data = np.array([2001, 120, 3101, 4010])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_min_no_max(self):
"""Test _reverse_transform with ``min_value`` set
The ``_reverse_transform`` method should clip any values below
the ``min_value`` if it is set.
Input:
- Array with values below the min and infinitely high values
Output:
- Array with low values clipped to min
"""
# Setup
data = np.array([-np.inf, -5000, -301, -250, 0, 125, 400, np.inf])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._min_value = -300
result = transformer._reverse_transform(data)
# Asserts
expected_data = np.array([-300, -300, -300, -250, 0, 125, 400, np.inf])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_max_no_min(self):
"""Test _reverse_transform with ``max_value`` set
The ``_reverse_transform`` method should clip any values above
the ``max_value`` if it is set.
Input:
- Array with values above the max and infinitely low values
Output:
- Array with values clipped to max
"""
# Setup
data = np.array([-np.inf, -5000, -301, -250, 0, 125, 401, np.inf])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._max_value = 400
result = transformer._reverse_transform(data)
# Asserts
expected_data = np.array([-np.inf, -5000, -301, -250, 0, 125, 400, 400])
np.testing.assert_array_equal(result, expected_data)
def test__reverse_transform_min_and_max(self):
"""Test _reverse_transform with ``min_value`` and ``max_value`` set
The ``_reverse_transform`` method should clip any values above
the ``max_value`` and any values below the ``min_value``.
Input:
- Array with values above the max and below the min
Output:
- Array with out of bound values clipped to min and max
"""
# Setup
data = np.array([-np.inf, -5000, -301, -250, 0, 125, 401, np.inf])
# Run
transformer = NumericalTransformer(dtype=float, nan=None)
transformer._max_value = 400
transformer._min_value = -300
result = transformer._reverse_transform(data)
# Asserts
np.testing.assert_array_equal(result, np.array([-300, -300, -300, -250, 0, 125, 400, 400]))
def test__reverse_transform_min_an_max_with_nulls(self):
"""Test _reverse_transform with nulls and ``min_value`` and ``max_value`` set
The ``_reverse_transform`` method should clip any values above
the ``max_value`` and any values below the ``min_value``. Null values
should be replaced with ``np.nan``.
Input:
- 2d array where second column has some values over 0.5 representing null values
Output:
- Array with out of bounds values clipped and null values injected
"""
# Setup
data = np.array([
[-np.inf, 0],
[-5000, 0.1],
[-301, 0.8],
[-250, 0.4],
[0, 0],
[125, 1],
[401, 0.2],
[np.inf, 0.5]
])
clipped_data = np.array([
[-300, 0],
[-300, 0.1],
[-300, 0.8],
[-250, 0.4],
[0, 0],
[125, 1],
[400, 0.2],
[400, 0.5]
])
expected_data = np.array([-300, -300, np.nan, -250, 0, np.nan, 400, 400])
# Run
transformer = NumericalTransformer(dtype=float, nan='nan')
transformer._max_value = 400
transformer._min_value = -300
transformer.null_transformer = Mock()
transformer.null_transformer.reverse_transform.return_value = expected_data
result = transformer._reverse_transform(data)
# Asserts
null_transformer_calls = transformer.null_transformer.reverse_transform.mock_calls
np.testing.assert_array_equal(null_transformer_calls[0][1][0], clipped_data)
np.testing.assert_array_equal(result, expected_data)
class TestNumericalBoundedTransformer(TestCase):
def test___init__(self):
"""super() arguments are properly passed and set as attributes."""
# Run
nt = NumericalBoundedTransformer(dtype='int', null_column=False)
# Assert
assert nt.dtype == 'int'
assert nt.nan == 'mean'
assert nt.null_column is False
assert nt.min_value == 'auto'
assert nt.max_value == 'auto'
assert nt.rounding is None
class TestNumericalRoundedTransformer(TestCase):
def test___init__(self):
"""super() arguments are properly passed and set as attributes."""
# Run
nt = NumericalRoundedTransformer(dtype='int', null_column=False)
# Assert
assert nt.dtype == 'int'
assert nt.nan == 'mean'
assert nt.null_column is False
assert nt.min_value is None
assert nt.max_value is None
assert nt.rounding == 'auto'
class TestNumericalRoundedBoundedTransformer(TestCase):
def test___init__(self):
"""super() arguments are properly passed and set as attributes."""
# Run
nt = NumericalRoundedBoundedTransformer(dtype='int', null_column=False)
# Assert
assert nt.dtype == 'int'
assert nt.nan == 'mean'
assert nt.null_column is False
assert nt.min_value == 'auto'
assert nt.max_value == 'auto'
assert nt.rounding == 'auto'
class TestGaussianCopulaTransformer:
def test___init__super_attrs(self):
"""super() arguments are properly passed and set as attributes."""
ct = GaussianCopulaTransformer(dtype='int', nan='mode', null_column=False)
assert ct.dtype == 'int'
assert ct.nan == 'mode'
assert ct.null_column is False
def test___init__str_distr(self):
"""If distribution is an str, it is resolved using the _DISTRIBUTIONS dict."""
ct = GaussianCopulaTransformer(distribution='univariate')
assert ct._distribution is copulas.univariate.Univariate
def test___init__non_distr(self):
"""If distribution is not an str, it is store as given."""
univariate = copulas.univariate.Univariate()
ct = GaussianCopulaTransformer(distribution=univariate)
assert ct._distribution is univariate
def test__get_distributions_copulas_not_installed(self):
"""Test the ``_get_distributions`` method when copulas is not installed.
Validate that this method raises the appropriate error message when copulas is
not installed.
Raise:
- ImportError('\n\nIt seems like `copulas` is not installed.\n'
'Please install it using:\n\n pip install rdt[copulas]')
"""
__py_import__ = __import__
def custom_import(name, *args):
if name == 'copulas':
raise ImportError('Simulate copulas not being importable.')
return __py_import__(name, *args)
with patch('builtins.__import__', side_effect=custom_import):
with pytest.raises(ImportError, match=r'pip install rdt\[copulas\]'):
GaussianCopulaTransformer._get_distributions()
def test__get_distributions(self):
"""Test the ``_get_distributions`` method.
Validate that this method returns the correct dictionary of distributions.
Setup:
- instantiate a ``GaussianCopulaTransformer``.
"""
# Setup
transformer = GaussianCopulaTransformer()
# Run
distributions = transformer._get_distributions()
# Assert
expected = {
'univariate': univariate.Univariate,
'parametric': (
univariate.Univariate, {
'parametric': univariate.ParametricType.PARAMETRIC,
},
),
'bounded': (
univariate.Univariate,
{
'bounded': univariate.BoundedType.BOUNDED,
},
),
'semi_bounded': (
univariate.Univariate,
{
'bounded': univariate.BoundedType.SEMI_BOUNDED,
},
),
'parametric_bounded': (
univariate.Univariate,
{
'parametric': univariate.ParametricType.PARAMETRIC,
'bounded': univariate.BoundedType.BOUNDED,
},
),
'parametric_semi_bounded': (
univariate.Univariate,
{
'parametric': univariate.ParametricType.PARAMETRIC,
'bounded': univariate.BoundedType.SEMI_BOUNDED,
},
),
'gaussian': univariate.GaussianUnivariate,
'gamma': univariate.GammaUnivariate,
'beta': univariate.BetaUnivariate,
'student_t': univariate.StudentTUnivariate,
'gaussian_kde': univariate.GaussianKDE,
'truncated_gaussian': univariate.TruncatedGaussian,
}
assert distributions == expected
def test__get_univariate_instance(self):
"""Test the ``_get_univariate`` method when the distribution is univariate.
Validate that a deepcopy of the distribution stored in ``self._distribution`` is returned.
Setup:
- create an instance of a ``GaussianCopulaTransformer`` with ``distribution`` set
to ``univariate.Univariate``.
Output:
- a copy of the value stored in ``self._distribution``.
"""
# Setup
distribution = copulas.univariate.Univariate()
ct = GaussianCopulaTransformer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert univariate is not distribution
assert isinstance(univariate, copulas.univariate.Univariate)
assert dir(univariate) == dir(distribution)
def test__get_univariate_tuple(self):
"""Test the ``_get_univariate`` method when the distribution is a tuple.
When the distribution is passed as a tuple, it should return an instance
with the passed arguments.
Setup:
- create an instance of a ``GaussianCopulaTransformer`` and set
``distribution`` to a tuple.
Output:
- an instance of ``copulas.univariate.Univariate`` with the passed arguments.
"""
# Setup
distribution = (
copulas.univariate.Univariate,
{'candidates': 'a_candidates_list'}
)
ct = GaussianCopulaTransformer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert isinstance(univariate, copulas.univariate.Univariate)
assert univariate.candidates == 'a_candidates_list'
def test__get_univariate_class(self):
"""Test the ``_get_univariate`` method when the distribution is a class.
When ``distribution`` is passed as a class, it should return an instance
without passing arguments.
Setup:
- create an instance of a ``GaussianCopulaTransformer`` and set ``distribution``
to ``univariate.Univariate``.
Output:
- an instance of ``copulas.univariate.Univariate`` without any arguments.
"""
# Setup
distribution = copulas.univariate.Univariate
ct = GaussianCopulaTransformer(distribution=distribution)
# Run
univariate = ct._get_univariate()
# Assert
assert isinstance(univariate, copulas.univariate.Univariate)
def test__get_univariate_error(self):
"""Test the ``_get_univariate`` method when ``distribution`` is invalid.
Validate that it raises an error if an invalid distribution is stored in
``distribution``.
Setup:
- create an instance of a ``GaussianCopulaTransformer`` and set ``self._distribution``
improperly.
Raise:
- TypeError(f'Invalid distribution: {distribution}')
"""
# Setup
distribution = 123
ct = GaussianCopulaTransformer(distribution=distribution)
# Run / Assert
with pytest.raises(TypeError):
ct._get_univariate()
def test__fit(self):
"""Test the ``_fit`` method.
Validate that ``_fit`` calls ``_get_univariate``.
Setup:
- create an instance of the ``GaussianCopulaTransformer``.
- mock the ``_get_univariate`` method.
Input:
- a pandas series of float values.
Side effect:
- call the `_get_univariate`` method.
"""
# Setup
data =
|
pd.Series([0.0, np.nan, 1.0])
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ahocorasick
import math
import os
import re
import sys
import shutil
import glob
import xlsxwriter
import subprocess
from functools import partial
from itertools import product, combinations
from subprocess import DEVNULL
from multiprocessing import Pool
from threading import Timer
import random
import pandas as pd
import tqdm
import primer3
from Bio import SeqIO, Seq
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "MIT"
__maintainer__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Production"
class PRIMeval:
def __init__(self, run_id, max_primer_mismatches, max_probe_mismatches, max_product_size, cross_check, probes_only, method, dimer_check,
primerMonovalentCations, primerDivalentCations, primerDNTPs, primerConcentration, primerAnnealingTemp,
probeMonovalentCations, probeDivalentCations, probeDNTPs, probeConcentration, probeAnnealingTemp, prebuilt = ""):
# parameters
self.run_id = run_id
self.max_primer_mismatches = int(max_primer_mismatches)
self.max_probe_mismatches = int(max_probe_mismatches)
self.max_product_size = int(max_product_size)
self.max_mismatches = max(max_primer_mismatches, max_probe_mismatches)
if self.max_mismatches == 0:
self.l, self.e, self.qcov, self.perciden = 5, 10, 100, 100
elif self.max_mismatches == 1:
self.l, self.e, self.qcov, self.perciden = 5, 40, 90, 90
elif self.max_mismatches == 2:
self.l, self.e, self.qcov, self.perciden = 5, 70, 85, 85
else:
self.l, self.e, self.qcov, self.perciden = 5, 100, 80, 80
self.prebuilt = str(prebuilt)
self.bowtie_dbs = ["list_of_prebuilt_dbs_in_prebuilt_folder"]
self.bowtie_runs = []
self.blast_db_name = "user_db"
self.bowtie_index_name = "bindex"
self.num_threads = 48
self.method = method
if dimer_check == "True":
self.dimer_check = True
else:
self.dimer_check = False
if cross_check == "True":
self.same_package = False
else:
self.same_package = True
if probes_only == "True":
self.probes_only = True
else:
self.probes_only = False
# Cross dimer check
self.primer_monovalent_cations = str(primerMonovalentCations)
self.primer_divalent_cations = str(primerDivalentCations)
self.primer_dntps = str(primerDNTPs)
self.primer_annealing_oligo = str(primerConcentration)
self.primer_annealing_temp = str(primerAnnealingTemp)
self.probe_monovalent_cations = str(probeMonovalentCations)
self.probe_divalent_cations = str(probeDivalentCations)
self.probe_dntps = str(probeDNTPs)
self.probe_annealing_oligo = str(probeConcentration)
self.probe_annealing_temp = str(probeAnnealingTemp)
self.cross_dimer_dfs = []
self.cross_dimer_dfs_dg = []
self.hairpin_dfs = []
# Aho-Corasick Automaton
self.aho = ahocorasick.Automaton()
# folders
self.base_folder = os.getcwd() + "/"
self.run_folder = self.base_folder + "runs/" + str(self.run_id) + "/"
self.input_folder = self.run_folder + "input/"
self.output_folder = self.run_folder + "output/"
self.tmp_folder = self.run_folder + "tmp/"
self.input_contigs = self.run_folder + "input/contigs/"
self.primer_input_folder = self.run_folder + "input/primers/"
self.probes_input_folder = self.run_folder + "input/probes/"
self.blast_db_folder = self.run_folder + "tmp/blastdb/"
self.prebuilt_genomes = self.base_folder + "prebuilt/genomes/"
self.prebuilt_bowtie = self.base_folder + "prebuilt/bowtie/"
# files
self.output_contigs = self.run_folder + "tmp/merged_contigs.fasta"
self.blast_output_tmp_file = self.run_folder + "tmp/blast_tmp_results.txt"
self.blast_output_file = self.run_folder + "tmp/blast_results.txt"
self.bowtie_output_tmp_file = self.run_folder + "tmp/bowtie_tmp_results.txt"
self.bowtie_output_file = self.run_folder + "tmp/bowtie_results.txt"
self.bowtie_index_folder = self.run_folder + "tmp/bowtie_index_folder/"
self.oligo_file = self.run_folder + "output/oligos.fasta"
self.results_all = self.run_folder + "output/results.csv"
self.results_wob = self.run_folder + "output/results_wobbled.csv"
self.results_dimers = self.run_folder + "output/results_dimers.xlsx"
# settings
self.blastdb_cmd = "/path/to/makeblastdb"
self.bowtie_build_cmd = "/path/to/bowtie-build"
self.blast_cmd = "/path/to/blastn"
self.bowtie_cmd = "/path/to/bowtie"
self.faidx_cmd = "/path/to/samtools faidx "
self.pd_col_hits = ["Sequence", "Type", "Name", "Package", "StartPos", "EndPos", "MismatchesTotal",
"Strand", "HitSequence", "Tm", "dG"]
self.pd_col_results = ["Sequence", "Contig", "Primer1", "Primer2", "Probe", "Primer1Package",
"Primer2Package", "ProbePackage", "StartPos1", "EndPos1", "StartPos2", "EndPos2",
"StartPos3", "EndPos3", "Primer1Tm", "Primer2Tm", "ProbeTm", "Primer1dG", "Primer2dG", "ProbedG", "ProductSize", "ProductTm", "NoMismatchesLeft", "NoMismatchesRight",
"NoMismatchesProbe", "MismatchesLeft", "MismatchesRight", "MismatchesProbe",
"Comment", "Product"]
self.blast_txt_params = "\"6 qseqid sseqid nident qlen length mismatch qstart qend sstart sseq sstrand " \
"send\""
self.blast_txt_fields = ["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend",
"sstart", "sseq", "sstrand", "send"]
# return list of all possible sequences given an ambiguous DNA input
def _extend_ambiguous_dna(self, seq):
d = Seq.IUPAC.IUPACData.ambiguous_dna_values
return list(map("".join, product(*map(d.get, seq))))
def _get_sequence(self, contig_file, wanted_contig, start, end, strand=1):
try:
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
try:
contig_file = self.prebuilt_genomes + wanted_contig.split("__contigname__", 1)[0] + ".fasta"
command = self.faidx_cmd + contig_file + " '" + wanted_contig + ":" + str(start) + "-" + str(end) + "'"
call = subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL).decode().split("\n", 1)[1]
except:
sys.exit("Failed retrieving: " + command)
call = re.sub("\n|\r", "", call)
sequence = Seq.Seq(call)
if strand == 1:
return sequence.upper()
else:
return sequence.reverse_complement().upper()
# Get a visual representation of mismatches between two sequences
def _mismatch_visualization(self, seq_a, seq_b):
seq_a, seq_b = seq_a.upper(), seq_b.upper()
mismatches = ""
if (len(seq_a) - len(seq_b) != 0):
return "Error"
for pos in range(0, len(seq_a)):
if seq_a[pos] != seq_b[pos]:
mismatches += "(" + seq_a[pos] + "/" + seq_b[pos] + ")"
else:
mismatches += "="
return mismatches
def _prepare_folders(self):
if os.path.exists(self.output_folder):
shutil.rmtree(self.output_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Create output and tmp folders
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if not os.path.exists(self.tmp_folder):
os.makedirs(self.tmp_folder)
if not os.path.exists(self.bowtie_index_folder):
os.makedirs(self.bowtie_index_folder)
def _clean_up_folders(self):
# if os.path.exists(self.input_folder):
# shutil.rmtree(self.input_folder)
if os.path.exists(self.tmp_folder):
shutil.rmtree(self.tmp_folder)
# Rename primer and probes, create new sequences without IUPAC codes and save in file
# Used for dimer check: self.packages, packages, package, oligo_name
def _import_oligos(self, folder, oligotype):
packages = {}
primer_records = []
allowed_chars = "[^0-9a-zA-Z()'_\+-]+"
for file in os.listdir(folder):
if file.endswith(".fasta"):
package = file.rsplit(".fasta", 1)[0]
packages[package] = {}
sequences = SeqIO.parse(open(folder + file), "fasta")
for fasta in sequences:
m = re.search("[M,R,W,S,Y,K,V,H,D,B,N]", str(fasta.seq))
if m:
sequence_mutations = self._extend_ambiguous_dna(str(fasta.seq))
mutation_count = 0
for mutation in sequence_mutations:
mutation_count += 1
oligo_name = re.sub(allowed_chars, "_", fasta.description) + "_mut" + str(mutation_count)
packages[package][oligo_name] = str(mutation)
if oligotype == "probe":
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count) + "_probe", description="")
else:
rec = SeqRecord(Seq.Seq(mutation, IUPAC),
id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_mut" + str(
mutation_count), description="")
primer_records.append(rec)
else:
oligo_name = re.sub(allowed_chars, "_", fasta.description)
packages[package][oligo_name] = str(fasta.seq)
if oligotype == "probe":
rec = SeqRecord(fasta.seq, id=package + "^" + re.sub(allowed_chars, "_",
fasta.description) + "_probe",
description="")
else:
rec = SeqRecord(fasta.seq,
id=package + "^" + re.sub(allowed_chars, "_", fasta.description),
description="")
primer_records.append(rec)
output_handle = open(self.oligo_file, "a")
SeqIO.write(primer_records, output_handle, "fasta")
output_handle.close()
if oligotype == "primer":
self.primer_packages = packages
else:
self.probe_packages = packages
# Rename and merge contigs
def _import_contigs(self):
seq_records = []
for file in os.listdir(self.input_contigs):
# CHANGE: other file endings should also be possible (see with Django upload permitted filenames)
if file.endswith(".fasta"):
base_contig_name = file.replace(".fasta", "")
for entry in SeqIO.parse(self.input_contigs + file, "fasta"):
my_new_id = base_contig_name + "__contigname__" + entry.id
seq_records.append(SeqRecord(entry.seq, id=my_new_id, description=""))
output_handle = open(self.output_contigs, "w")
SeqIO.write(seq_records, output_handle, "fasta")
output_handle.close()
command = self.faidx_cmd + self.output_contigs
subprocess.call(command, shell=True)
def _import_sequences(self):
if self.probes_only == False:
self._import_oligos(self.primer_input_folder, "primer")
self._import_oligos(self.probes_input_folder, "probe")
self._import_contigs()
def _create_blast_db(self):
command = self.blastdb_cmd + " -in " + self.output_contigs + " -dbtype nucl -out " + self.blast_db_folder + self.blast_db_name
subprocess.call(command, shell=True)
def _create_bowtie_index(self):
command = self.bowtie_build_cmd + " --threads " + str(
self.num_threads) + " -f " + self.output_contigs + " " + self.bowtie_index_folder + self.bowtie_index_name
subprocess.call(command, shell=True)
def _blast_call(self):
command = self.blast_cmd + " -db " + self.blast_db_folder + self.blast_db_name + " -query " + self.oligo_file + " -out " + \
self.blast_output_tmp_file + " -outfmt " + self.blast_txt_params + " -num_threads " + str(
self.num_threads) + " -evalue 200000 " \
"-qcov_hsp_perc " + str(self.qcov) + " -perc_identity " + str(self.perciden) + " -max_target_seqs 2000000 -word_size 4 -ungapped"
subprocess.call(command, shell=True)
with open(self.blast_output_file, "a") as out_file:
with open(self.blast_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _bowtie_call(self, index_folder = "", index_name = ""):
mismatches = self.max_primer_mismatches if self.max_primer_mismatches >= self.max_probe_mismatches else self.max_probe_mismatches
if index_folder == "" and index_name == "":
if os.path.getsize(self.output_contigs) == 0:
return
index_folder = self.bowtie_index_folder
index_name = self.bowtie_index_name
command = self.bowtie_cmd + " -f -a -p " + str(self.num_threads) + " -n " + str(
mismatches) + " -l " + str(self.l) + " -e " + str(self.e) + " " + index_folder + index_name + " " + self.oligo_file + " " + self.bowtie_output_tmp_file
subprocess.call(command, shell=True)
with open(self.bowtie_output_file, "a") as out_file:
with open(self.bowtie_output_tmp_file) as in_file:
out_file.write(in_file.read())
def _specificity_calls(self):
for db in self.bowtie_runs:
self._bowtie_call(self.prebuilt_bowtie, db)
def _multiprocess_convert_bowtie_to_blast(self):
# in case no hits are returned
try:
df = pd.read_csv(self.bowtie_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
split_df = self.splitDataFrameIntoChunks(df)
func = partial(self._convert_bowtie_to_blast)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, split_df), total=len(split_df)))
self.df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
self.df_bowtie = pd.concat(multiprocessing_results, ignore_index=True)
def _convert_bowtie_to_blast(self, df):
df_bowtie = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
for index, line in df.iterrows():
mismatch = str(line[7]).count(":")
if line[0].endswith("_probe") and mismatch > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and mismatch > self.max_primer_mismatches:
continue
sstrand = "plus" if line[1] == "+" else "minus"
qseqid = line[0]
sseqid = line[2]
qlen = len(line[4])
length = qlen
qstart = 1
qend = qlen
sstart = int(line[3]) + 1
send = sstart + qlen - 1
nident = qlen - mismatch
if sstrand == "minus":
temp_swap = sstart
sstart = send
send = temp_swap
if mismatch == 0:
sseq = str(Seq.Seq(line[4]).reverse_complement())
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], -1)
else:
if mismatch == 0:
sseq = line[4]
else:
sseq = self._resolve_bowtie_mismtches(line[4], line[7], 1)
df_bowtie.loc[len(df_bowtie)] = [str(qseqid), str(sseqid), str(nident), str(qlen), str(length),
str(mismatch), str(qstart),
str(qend), str(sstart), str(sseq), str(sstrand), str(send)]
return df_bowtie
def _resolve_bowtie_mismtches(self, sequence, mismatches, strand):
sequence = Seq.Seq(sequence) if strand == 1 else Seq.Seq(sequence).reverse_complement()
mismatches = mismatches.split(",")
for mismatch in mismatches:
position, base = mismatch.split(":", 1)
position = int(position)
base = base[0] if strand == 1 else Seq.Seq(base[0]).reverse_complement()
sequence = sequence[:position] + base + sequence[position+1:]
return str(sequence)
def _split_output(self):
if self.method == "blast":
# in case no hits are returned
try:
df_blast = pd.read_csv(self.blast_output_file, sep="\t", header=None)
except pd.errors.EmptyDataError:
df_blast = pd.DataFrame(columns = ["", "", "", "", "", "", "", "", "", "", "", ""])
self.df_blast_split = self.splitDataFrameIntoChunks(df_blast)
if self.method == "bowtie":
self.df_bowtie_split = self.splitDataFrameIntoChunks(self.df_bowtie)
if self.method == "aho-corasick":
self.df_aho_split = self.splitDataFrameIntoChunks(self.df_aho)
def splitDataFrameIntoChunks(self, df):
chunkSize = math.ceil(len(df) / self.num_threads)
if chunkSize == 0:
chunkSize = 1
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i * chunkSize:(i + 1) * chunkSize])
return listOfDf
def _multiprocess_split_files(self):
if self.method == "blast":
input_files = self.df_blast_split
if self.method == "bowtie":
input_files = self.df_bowtie_split
if self.method == "aho-corasick":
input_files = self.df_aho_split
func = partial(self._parse_blastlike_results_df)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, input_files), total=len(input_files)))
self.hits = pd.concat(multiprocessing_results, ignore_index=True)
hits_output = self.hits.copy()
if len(hits_output.index) > 0:
hits_output[['Sequence', 'Contig']] = hits_output['Sequence'].str.split("__contigname__", 1, expand=True)
hits_output = hits_output[
['Sequence', 'Contig', 'Type', 'Name', 'Package', 'StartPos', 'EndPos', 'MismatchesTotal', 'Strand',
'HitSequence', 'Tm', 'dG']]
tmp = hits_output['Name'].str.rsplit("_probe", 1, expand = True)
hits_output['Name'] = tmp[0]
hits_output.to_csv(self.output_folder + "all_hits.csv", index=False, sep=";")
def _process_probes_only(self):
probes_df = self.hits[(self.hits['Type'] == "Probe")]
if len(probes_df.index) > 0:
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
probes_df = probes_df.drop(columns = ['Type', 'Strand'])
probes_df = probes_df.rename(columns = {'Name': 'Probe', 'Package': 'ProbePackage', 'MismatchesTotal': 'NoMismatchesProbe'})
probes_df[['Sequence', 'Contig']] = probes_df['Sequence'].str.split("__contigname__", 1, expand = True)
probes_df['MismatchesProbe'] = probes_df.apply(lambda x: self._mismatch_visualization(oligos_full_sequences[x['ProbePackage'] + "^" + x['Probe']].seq, x['HitSequence']), axis=1)
probes_df = probes_df[['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm' ,'dG']]
tmp = probes_df['Probe'].str.rsplit("_probe", 1, expand = True)
probes_df['Probe'] = tmp[0]
probes_df.to_csv(self.results_all, index=False, sep=";")
# parse wobbled primers
subset = probes_df[probes_df['Probe'].str.contains("_mut")]
subset_r = subset.replace(['_mut([0-9])+'], [''], regex=True)
# hits without mutations
unique = probes_df.merge(subset, indicator=True, how="outer")
unique = unique[unique['_merge'] == 'left_only']
unique = unique.drop("_merge", axis=1)
results2 = pd.DataFrame(columns=['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'NoMismatchesProbe', 'MismatchesProbe', 'HitSequence', 'Tm', 'dG'])
for s in subset_r.groupby(['Sequence', 'Contig', 'Probe', 'ProbePackage', 'StartPos', 'EndPos', 'HitSequence']).groups.items():
# Fields to be changed: NoMismatchesProbe, MismatchesProbe
sample = subset_r.loc[s[1]] # get one set
first_row = sample.iloc[0]
if len(sample) < 2:
results2.loc[len(results2)] = first_row
else:
mismatch_min, mismatch_max = min(sample['NoMismatchesProbe']), max(sample['NoMismatchesProbe'])
mismatches = mismatch_min if mismatch_min == mismatch_max else str(mismatch_min) + "-" + str(mismatch_max)
tm_min, tm_max = min(sample['Tm']), max(sample['Tm'])
tm = tm_min if tm_min == tm_max else str(tm_min) + "-" + str(tm_max)
dg_min, dg_max = min(sample['dG']), max(sample['dG'])
dg = dg_min if dg_min == dg_max else str(dg_min) + "-" + str(dg_max)
# Get first row and then replace values of first row (all other fields are identifical)
results2.loc[len(results2)] = [first_row['Sequence'], first_row['Contig'], first_row['Probe'], first_row['ProbePackage'],
first_row['StartPos'], first_row['EndPos'], mismatches, '', first_row['HitSequence'], tm, dg]
wobbled = unique.append(results2)
wobbled.to_csv(self.results_wob, index=False, sep=";")
def _parse_blastlike_results_df(self, blast_df):
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
hits = pd.DataFrame(columns=self.pd_col_hits)
for index, line in blast_df.iterrows():
if self.method == "aho-corasick":
if line[0].endswith("_probe") and int(line[5]) > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and int(line[5]) > self.max_primer_mismatches:
continue
new_package = line[0].split("^", 1)[0]
new_qresult = line[0].split("^", 1)[1]
hit_strand = 1 if line[10] == "plus" else -1
mismatches_total = int(line[5])
hit_seq = line[9]
type = "Probe" if line[0].endswith("_probe") == True else "Primer"
if hit_strand == -1:
temp_swap = int(line[8])
sstart = int(line[11])
send = temp_swap
else:
sstart = int(line[8])
send = int(line[11])
tm, dg = self._calc_thermal_parameters(str(oligos_full_sequences[line[0]].seq.reverse_complement()), hit_seq, type)
hits.loc[len(hits)] = [line[1], type, new_qresult, new_package, sstart, send,
mismatches_total, hit_strand, hit_seq, tm, dg]
else:
mismatches_left = int(line[6])
mismatches_right = int(line[3]) - int(line[7])
mismatches_middle = int(line[3]) - int(line[2]) - mismatches_left - mismatches_right
mismatches_total = mismatches_left + mismatches_right + mismatches_middle
if line[0].endswith("_probe") and mismatches_total > self.max_probe_mismatches:
continue
if not line[0].endswith("_probe") and mismatches_total > self.max_primer_mismatches:
continue
new_package = line[0].split("^", 1)[0]
new_qresult = line[0].split("^", 1)[1]
hit_strand = 1 if line[10] == "plus" else -1
type = "Probe" if line[0].endswith("_probe") == True else "Primer"
correct_start = mismatches_left - 1 if hit_strand == 1 else mismatches_right if hit_strand == -1 else 0
correct_end = mismatches_right if hit_strand == 1 else mismatches_left - 1 if hit_strand == -1 else 0
if hit_strand == -1:
temp_swap = int(line[8])
sstart = int(line[11]) - correct_start
send = temp_swap + correct_end
else:
sstart = int(line[8]) - correct_start
send = int(line[11]) + correct_end
if mismatches_left > 0 or mismatches_right > 0:
hit_seq = self._get_sequence(self.output_contigs, line[1], sstart, send, hit_strand)
else:
hit_seq = line[9]
tm, dg = self._calc_thermal_parameters(str(oligos_full_sequences[line[0]].seq.reverse_complement()), hit_seq, type)
hits.loc[len(hits)] = [line[1], type, new_qresult, new_package, sstart, send,
mismatches_total, hit_strand, hit_seq, tm, dg]
return hits
def _multiprocess_hits(self):
objects = []
groups = []
num_groups = math.ceil(len(self.hits[self.hits['Type'] == "Primer"].groupby("Sequence").groups.items()) / self.num_threads)
i = 1
for s in self.hits[self.hits['Type'] == "Primer"].groupby("Sequence").groups.items():
if i > num_groups:
groups.append(objects)
objects = []
i = 1
objects.append(self.hits.loc[s[1]])
i += 1
groups.append(objects)
multiprocessing_results = []
func = partial(self._parse_hits)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, groups), total=len(groups)))
self.results = pd.concat(multiprocessing_results, ignore_index=True)
def _parse_hits(self, groups):
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
results = pd.DataFrame(columns=self.pd_col_results)
for df in groups:
start = df['StartPos'].values.tolist()
end = df['EndPos'].values.tolist()
a = [(y - x, (i, j)) for i, x in enumerate(start) for j, y in enumerate(end) if
(y - x) > 0 and (y - x) <= self.max_product_size and i != j]
if len(a) == 0:
continue
for product_size, (left, right) in a:
if df['Strand'].iloc[left] == df['Strand'].iloc[right] or df['Strand'].iloc[left] == -1:
continue
if self.same_package == True and df['Package'].iloc[left] != df['Package'].iloc[right]:
continue
sequence, contig = df['Sequence'].iloc[left].split("__contigname__", 1)
product = self._get_sequence(self.output_contigs, df['Sequence'].iloc[left], df['StartPos'].iloc[left],
df['EndPos'].iloc[right], 1)
tmp_left_name = df["Package"].iloc[left] + "^" + df['Name'].iloc[left]
mismatches1 = self._mismatch_visualization(oligos_full_sequences[tmp_left_name].seq,
df['HitSequence'].iloc[left])
tmp_right_name = df["Package"].iloc[right] + "^" + df['Name'].iloc[right]
mismatches2 = self._mismatch_visualization(oligos_full_sequences[tmp_right_name].seq,
df['HitSequence'].iloc[right])
probe_matches = self.hits[(self.hits['Type'] == "Probe") &
(self.hits['Sequence'] == df['Sequence'].iloc[left]) &
(self.hits['StartPos'] >= df['StartPos'].iloc[left]) &
(self.hits['EndPos'] <= df['EndPos'].iloc[right])]
if self.same_package == True:
probe_package_match = str(df['Package'].iloc[left]) + "_probes"
probe_matches = probe_matches[(probe_matches['Package'] == probe_package_match)]
no_probe_hits = len(probe_matches.index)
product_tm = self._calc_Tm(str(product), "Product")
if no_probe_hits == 0:
# save matching primers, no matching probe found
results.loc[len(results)] = [sequence, contig, df['Name'].iloc[left], df['Name'].iloc[right], "",
df['Package'].iloc[left], df['Package'].iloc[right], "",
df['StartPos'].iloc[left], df['EndPos'].iloc[left],
df['StartPos'].iloc[right], df['EndPos'].iloc[right], "", "",
df['Tm'].iloc[left], df['Tm'].iloc[right], "", df['dG'].iloc[left], df['dG'].iloc[right], "",
int(product_size+1), product_tm, df['MismatchesTotal'].iloc[left],
df['MismatchesTotal'].iloc[right], "", mismatches1, mismatches2, "",
"", str(product)]
else:
for index, row in probe_matches.iterrows():
tmp_probe_name = row['Package'] + "^" + row['Name']
probe_mivi = self._mismatch_visualization(oligos_full_sequences[tmp_probe_name].seq,
row['HitSequence'])
comment = "More than 1 probe binding to amplicon." if no_probe_hits > 1 else ""
# save matching primers and matching probes, multiple probes per primer pair possible
results.loc[len(results)] = [sequence, contig, df['Name'].iloc[left], df['Name'].iloc[right],
row['Name'], df['Package'].iloc[left], df['Package'].iloc[right],
row['Package'], df['StartPos'].iloc[left], df['EndPos'].iloc[left],
df['StartPos'].iloc[right], df['EndPos'].iloc[right],
row['StartPos'], row['EndPos'],
df['Tm'].iloc[left], df['Tm'].iloc[right], row['Tm'], df['dG'].iloc[left], df['dG'].iloc[right], row['dG'],
int(product_size+1), product_tm,
df['MismatchesTotal'].iloc[left],
df['MismatchesTotal'].iloc[right], row['MismatchesTotal'],
mismatches1, mismatches2, probe_mivi, comment, str(product)]
return results
def _parse_results_to_wobbled(self):
subset = self.results[
self.results['Primer1'].str.contains("_mut") | self.results['Primer2'].str.contains("_mut")]
subset_r = subset.replace(['_mut([0-9])+'], [''], regex=True)
# results without mutations
unique = self.results.merge(subset, indicator=True, how="outer")
unique = unique[unique['_merge'] == 'left_only']
unique = unique.drop("_merge", axis=1)
results2 = pd.DataFrame(columns=self.pd_col_results)
for s in subset_r.groupby(
['Sequence', 'Contig', 'Primer1', 'Primer2', 'Probe', 'Primer1Package', 'Primer2Package',
'ProbePackage',
'StartPos1', 'EndPos1', 'StartPos2', 'EndPos2',
'StartPos3', 'EndPos3', 'ProductSize', 'Comment',
'Product']).groups.items():
# Fields to be changed: NoMismatchesLeft, NoMismatchesRight, MismatchesLeft, MismatchesRight, NoMismatchesProbe, MismatchesProbe
sample = subset_r.loc[s[1]] # get one set
first_row = sample.iloc[0]
if len(sample) < 2:
results2.loc[len(results2)] = first_row
else:
primer_left_min, primer_left_max = min(sample['NoMismatchesLeft']), max(sample['NoMismatchesLeft'])
primer_left = primer_left_min if primer_left_min == primer_left_max else str(primer_left_min) + "-" + str(primer_left_max)
primer_right_min, primer_right_max = min(sample['NoMismatchesRight']), max(sample['NoMismatchesRight'])
primer_right = primer_right_min if primer_right_min == primer_right_max else str(primer_right_min) + "-" + str(primer_right_max)
probe_min, probe_max = min(sample['NoMismatchesProbe']), max(sample['NoMismatchesProbe'])
probe = probe_min if probe_min == probe_max else str(probe_min) + "-" + str(probe_max)
primer1tm_min, primer1tm_max = min(sample['Primer1Tm']), max(sample['Primer1Tm'])
primer1tm = primer1tm_min if primer1tm_min == primer1tm_max else str(primer1tm_min) + "-" + str(primer1tm_max)
primer2tm_min, primer2tm_max = min(sample['Primer2Tm']), max(sample['Primer2Tm'])
primer2tm = primer2tm_min if primer2tm_min == primer2tm_max else str(primer2tm_min) + "-" + str(primer2tm_max)
primer1dg_min, primer1dg_max = min(sample['Primer1dG']), max(sample['Primer1dG'])
primer1dg = primer1dg_min if primer1dg_min == primer1dg_max else str(primer1dg_min) + "-" + str(primer1dg_max)
primer2dg_min, primer2dg_max = min(sample['Primer2dG']), max(sample['Primer2dG'])
primer2dg = primer2dg_min if primer2dg_min == primer2dg_max else str(primer2dg_min) + "-" + str(primer2dg_max)
producttm_min, producttm_max = min(sample['ProductTm']), max(sample['ProductTm'])
producttm = producttm_min if producttm_min == producttm_max else str(producttm_min) + "-" + str(producttm_max)
probetm_min, probetm_max = min(sample['ProbeTm']), max(sample['ProbeTm'])
probetm = probetm_min if probetm_min == probetm_max else str(probetm_min) + "-" + str(probetm_max)
probedg_min, probedg_max = min(sample['ProbedG']), max(sample['ProbedG'])
probedg = probedg_min if probedg_min == probedg_max else str(probedg_min) + "-" + str(probedg_max)
# Get first row and then replace values of first row (all other fields are identifical)
results2.loc[len(results2)] = [first_row['Sequence'], first_row['Contig'], first_row['Primer1'],
first_row['Primer2'], first_row['Probe'],
first_row['Primer1Package'], first_row['Primer2Package'],
first_row['ProbePackage'],
first_row['StartPos1'], first_row['EndPos1'],
first_row['StartPos2'], first_row['EndPos2'], first_row['StartPos3'],
first_row['EndPos3'], primer1tm, primer2tm, probetm, primer1dg, primer2dg, probedg, first_row['ProductSize'], producttm, primer_left,
primer_right, probe, '', '',
first_row['MismatchesProbe'], first_row['Comment'], first_row['Product']]
self.wobbled = unique.append(results2)
def _save_results(self):
if len(self.results.index) > 0:
tmp = self.results['Probe'].str.rsplit("_probe", 1, expand = True)
self.results['Probe'] = tmp[0]
self.results.to_csv(self.results_all, index=False, sep=";")
def _save_wobbled_results(self):
self.wobbled.to_csv(self.results_wob, index=False, sep=";")
def _add_oligos_to_automaton(self):
sequences = SeqIO.parse(open(self.oligo_file), "fasta")
for fasta in sequences:
name = str(fasta.id)
max_mismatches = self.max_probe_mismatches if name.endswith("_probe") else self.max_primer_mismatches
i = 0
while i <= max_mismatches:
for count, elem in enumerate(self._generate_primer_mismatches(fasta.seq, i)):
self.aho.add_word(elem.upper(), (name + "__" + str(count), elem.upper()))
for count, elem in enumerate(self._generate_primer_mismatches(fasta.seq.reverse_complement(), i)):
self.aho.add_word(elem.upper(), (name + "__rc__" + str(count), elem.upper()))
i += 1
def _generate_primer_mismatches(self, s, d=1):
N = len(s)
letters = 'ACGT'
pool = list(s)
for indices in combinations(range(N), d):
for replacements in product(letters, repeat=d):
skip = False
for i, a in zip(indices, replacements):
if pool[i] == a: skip = True
if skip: continue
keys = dict(zip(indices, replacements))
yield ''.join([pool[i] if i not in indices else keys[i] for i in range(N)])
def _multiprocess_string_search(self):
objects = []
groups = []
num_seq = sum(1 for x in SeqIO.parse(open(self.output_contigs), "fasta"))
num_groups = math.ceil(num_seq / self.num_threads)
# dynamic group allocation does not always work
# num_groups = 256
i = 1
for fasta in SeqIO.parse(open(self.output_contigs), "fasta"):
if i > num_groups:
groups.append(objects)
objects = []
i = 1
objects.append([str(fasta.seq).upper(), str(fasta.id)])
i += 1
groups.append(objects)
multiprocessing_results = []
func = partial(self._string_search_match)
with Pool(self.num_threads) as p:
multiprocessing_results = list(tqdm.tqdm(p.imap(func, groups), total=len(groups)))
self.df_aho = pd.concat(multiprocessing_results, ignore_index=True)
self.df_aho = self.df_aho.sort_values(['qseqid', 'mismatch'])
def _string_search_match(self, groups):
aho = self.aho
aho.make_automaton()
oligos_full_sequences = SeqIO.index(self.oligo_file, "fasta")
df = pd.DataFrame(
columns=["qseqid", "sseqid", "nident", "qlen", "length", "mismatch", "qstart", "qend", "sstart", "sseq",
"sstrand", "send"])
for object in groups:
haystack = object[0]
fasta = object[1]
for end_pos, (mutated_oligo_name, mutated_oligo_sequence) in aho.iter(haystack):
print("mutated oligo sequence: " + str(mutated_oligo_sequence))
start_pos = end_pos - len(mutated_oligo_sequence)
length = len(mutated_oligo_sequence)
if "__rc__" in mutated_oligo_name:
oligo_name = mutated_oligo_name.rsplit("__rc__", 1)[0]
oligo_seq = str(oligos_full_sequences[oligo_name].seq).upper()
strand = "minus"
start = end_pos + 1
end = start_pos + 2
sseq = self._get_sequence(self.output_contigs, str(fasta), end, start, -1)
mismatches = self._number_of_mismatches(sseq, oligo_seq)
nident = length - mismatches
else:
oligo_name = mutated_oligo_name.rsplit("__", 1)[0]
oligo_seq = str(oligos_full_sequences[oligo_name].seq).upper()
strand = "plus"
start = start_pos + 2
end = end_pos + 1
sseq = self._get_sequence(self.output_contigs, str(fasta), start, end, 1)
mismatches = self._number_of_mismatches(sseq, oligo_seq)
nident = length - mismatches
df.loc[len(df)] = [str(oligo_name), str(fasta), str(nident), str(length), str(length), str(mismatches),
str("1"), str(length), str(start), str(sseq), str(strand), str(end)]
return df
def _number_of_mismatches(self, s1, s2):
return sum(c1 != c2 for c1, c2 in zip(s1, s2))
def _run_cross_dimer_check(self):
for package in self.primer_packages:
oligo_list = []
for oligo_a in self.primer_packages[package]:
oligo_list.append((package, oligo_a, oligo_a, self.primer_packages[package][oligo_a], self.primer_packages[package][oligo_a]))
for oligo_a, oligo_b in combinations(self.primer_packages[package], 2):
oligo_list.append((package, oligo_a, oligo_b, self.primer_packages[package][oligo_a], self.primer_packages[package][oligo_b]))
pool = Pool(self.num_threads)
multiprocessing_results = []
func = partial(self._cross_dimer_check)
multiprocessing_results = list(tqdm.tqdm(pool.map(func, oligo_list), total=len(oligo_list)))
pool.close()
pool.join()
df =
|
pd.concat(multiprocessing_results, ignore_index=True)
|
pandas.concat
|
import json
import numpy as np
import pandas as pd
from pprint import pprint
from preprocessing import AbstractPreprocessor, preproc
import os
def simpleSplit(text):
return text.split()
#reads joson form the repo files. Every line is a valid json but the whole doc is not
def repo_read_json(filename, lemma = True):
with open(filename, "r") as fd:
line = fd.readline()
while line != "":
if line.find("bibo:abstract") != -1:
jsonobj = json.loads(line)
# print(jsonobj)
id = jsonobj["identifier"]
pretitle = jsonobj["bibo:shortTitle"]
title = preproc(pretitle, to_lemmatize=lemma)
abstract = jsonobj["bibo:abstract"]
# print(str(id) + "Type of id: " + str(type(id)))
# print(title + "Type of title: " + str(type(title)))
# print(abstract + "Type of abstract: " + str(type(abstract)))
print(id, pretitle, title, abstract, sep="\n")
line = fd.readline()
class AbstractExtracter():
def __init__(self, filenames=None, preprocessor=simpleSplit):
self.filenames = filenames
self.preprocessor = preprocessor
# self.columns = ["id", "title", "abstract"]
# self.df = pd.DataFrame(columns=self.columns)
self.df =
|
pd.DataFrame()
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.