prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#=========== pakages/modules that are used here ==============================
import re
import tweepy as tw
import pandas as pd
from datetime import datetime, timedelta
from os import path
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import statsmodels.api as sm
import stock_price as sp
import sentiment_analysis as sa
#================ code =======================================================
def clean_tweet_text(tweet):
'''
Cleans a tweet text by removing @mentions, hashtags, replies, any empty
lines, and hyperlink.
Parameters
----------
tweet(string): a twitter tweet
Returns
-------
Returns a cleaned tweet text.
'''
tweet = re.sub(r'@[A-Za-z0-9]+','', tweet) # remove @mentions
tweet = re.sub(r'#', '', tweet) # Remove '#' hash tag
tweet = re.sub(r'RT [@:]', '', tweet) # remove RT
tweet = re.sub(r'\n', '', tweet) # remove an empty line
tweet = re.sub('https?://[A-Za-z0-9./]+','', tweet) # remove hyperlink
return tweet
def collect_tweet(ticker):
'''
Import tweets about a stock over past 8 days, up to 200 tweets per day.
Save the cleaned the tweets as a csv file.
Parameters
----------
ticker(string): a ticker symbol
Returns
-------
Returns twitter output's csv file name.
'''
# output file name
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fout = ticker + '_Tweets_' + today_str + '.csv'
# if output file already exists, stop and return the filename
if path.exists(fout) == True:
return fout
# add your Tweeter API key info
consumerKey=''
consumerSecret = ''
accessToken =''
accessTokenSecret =''
# set up the authentication object
auth = tw.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tw.API(auth, wait_on_rate_limit = True)
# Collect tweets with keyword, English language for the past 8 days
search_word = ticker + " " + sp.get_co_short_name(ticker) #keywords
tweets_list = []
# search up to 200 tweets per day, for past 8 days
for i in range(0, 8):
date_until = datetime.today() - timedelta(days= i)
date_until_str = date_until.strftime("%Y-%m-%d") #end date for search
date_since = date_until - timedelta(days= 1)
date_since_str = date_since.strftime("%Y-%m-%d") #start date for search
# collect tweets up to 200 tweets per day
tweets = tw.Cursor(api.search,
q = search_word,
lang = "en",
since = date_since_str,
until = date_until_str,
result_type = "mixed",
tweet_mode = 'extended').items(200)
for tweet in tweets:
#tweet date
tweet_date = tweet.created_at.strftime("%Y-%m-%d")
#get full text
if tweet.truncated == True:
full_tweet_text = tweet.retweeted_status.full_text
else:
full_tweet_text = tweet.full_text
#clean text
clean_text = clean_tweet_text(full_tweet_text)
tweets_list.append([tweet_date, clean_text])
# save in a panda Dataframe
df = pd.DataFrame(tweets_list, columns = ['Date','Tweet Text'])
# save in csv
df.to_csv(fout, index = False, encoding='utf-8-sig')
return fout
def calc_twitter_sa(ticker):
'''
Calculates sentiment score for all stock tweets, and add the scores(positive,
negative, neutral, compound) as 4 new columns to the original csv file
Parameters
----------
ticker(string): a ticker symbol
Returns
-------
Returns twitter output's csv file name.
'''
# output file name
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fint = ticker + '_Tweets_' + today_str + '.csv'
# if twitter file N/A, collect tweets first.
if path.exists(fint) == False:
collect_tweet(ticker)
#load tweet data
df = pd.read_csv(fint)
# if a column called 'Compound' already exists in the file, return the file
# name and stop.
# because it means sentiment scores have been added.
if 'Compound' in df.columns:
return fint
# Get the sentiment scores for each trading day
compound = [] # compound score
neg = [] # negative score
neu = [] # neutral score
pos = [] # positive score
# calculate compound, negative, neutral, positive for each day by loop
for i in range(0, len(df['Tweet Text'])):
SIA = sa.getSIA(df['Tweet Text'][i])
compound.append(SIA['compound'])
neg.append(SIA['neg'])
neu.append(SIA['neu'])
pos.append(SIA['pos'])
#store the sentiment scores in the data frame
df['Compound'] = compound
df['Negative'] = neg
df['Neutral'] = neu
df['Positive'] = pos
#save in csv
df.to_csv(fint, index = False, encoding='utf-8-sig')
return fint
def merge_twitter_price(ticker):
'''
Merges tweet sentiment score with stock price during the past 8 days.
Parameters
----------
ticker(string): a ticker symbol
Returns
-------
Returns the merged DataFrame that contains data, stock price and tweet
sentiment score (compound score).
'''
date_today = datetime.today()
from_date = date_today - timedelta(days = 8)
today_str = date_today.strftime("%Y-%m-%d") #today's date
from_date_str = from_date.strftime("%Y-%m-%d") #date that is 8 days ago
#tweet file name
fint_1 = ticker + '_Tweets_' + today_str.replace('-','') + '.csv'
#stock price file name, where from_date is 8 days ago, and to_date is today.
fint_2 = ticker + '_HisPrice_' + from_date_str.replace('-','') +\
'_' + today_str.replace('-','') + '.csv'
print("\n loading tweets about " + ticker +" ... (this can take a while)")
#collect tweets and calculate sentiment scores if tweet file N/A
if path.exists(fint_1) == False:
calc_twitter_sa(ticker)
#get stock price if price file N/A
if path.exists(fint_2) == False:
sp.load_stock_price(ticker, from_date_str, today_str)
#add price movement direction and change in % for the price file
sp.add_price_move(ticker, from_date_str,today_str)
#load twitter data
df_twitter = pd.read_csv(fint_1, index_col = 0)
# get average Compound score by date and save it as a new DataFrame
df_twitter_new = df_twitter.groupby(['Date']).agg({'Compound':'mean',
})
#load price data
df_price = pd.read_csv(fint_2, index_col = 0)
# inner join by date
df_twitter_price= df_price.merge(df_twitter_new, how = 'inner', on ='Date',
left_index = True)
return df_twitter_price
def plot_twitter_sa_price(ticker):
'''
Plots tweet sentiment score vs stock price movement over the past 8 days
Parameters
----------
ticker(string): a ticker symbol
'''
# gets the merged data about tweet sentiment and price over the past 8 days
df = merge_twitter_price(ticker)
# convert index from text to date type
df.index = pd.to_datetime(df.index)
# create graph
mpl.rcParams.update(mpl.rcParamsDefault)
fig, ax = plt.subplots(figsize=(6, 3))
# plot price change percentage over time
ax.set_xlabel('Date').set_size(10)
ax.set_ylabel('Closing Price', color = 'brown')
# bar chart for price change in %
ax.bar(df.index, df['PriceChg'], color = 'lightsalmon', label='Price Change %' )
# plot polarity score percentage over time on a secondary y-axis
ax2 = ax.twinx()
ax2.plot(df.index, df['Compound'], color = 'royalblue',label='Compound Score')
ax2.set_ylabel('Compound Score', color ='royalblue')
# set axis limit and format labels/ticks
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b-%d'))
ax.set_title(ticker.upper() +" - price change% vs tweets' sentiment").set_size(10)
ax.tick_params(axis= "x", labelsize = 8)
ax.tick_params(axis= "y", labelsize = 8)
ax2.tick_params(axis= "y", labelsize = 8)
# display one single legend when there is multiple y-axis
handles,labels = [],[]
for ax in fig.axes:
for h,l in zip(*ax.get_legend_handles_labels()):
handles.append(h)
labels.append(l)
plt.legend(handles,labels, loc = 2, prop={'size': 8})
fig.tight_layout()
plt.show()
def model_tweet_sa_price(ticker):
'''
Analyzes the significance of tweet sentiment score on stock price
during the past 8 days.
Parameters
----------
ticker(string): a ticker symbol
'''
# load the merged dataset that contains price and tweet sentiment scores
df = merge_twitter_price(ticker)
#pick opening price, Compound score, Polarity scores as x variables
X = df[['Open','Compound']]
#pick adj close price as outcome variable
Y = df['Adj Close']
X = sm.add_constant(X) # adding a constant
#assign OLS model
model = sm.OLS(Y, X).fit()
predictions = model.predict(X)
#print model summary
print_model = model.summary()
print(print_model)
# plot
mpl.rcParams.update(mpl.rcParamsDefault)
df.index = pd.to_datetime(df.index)
fig, ax = plt.subplots(figsize=(6, 3))
#plot actual stock price
ax.plot(df.index, Y.values, '-', color = 'royalblue', label = 'actual closing price')
#plot model stock price
ax.plot(df.index, predictions , '--*', color = 'darkorange', label = 'model closing price')
# format labels and ticks
ax.set_ylabel('Price').set_size(10)
ax.set_xlabel('Date').set_size(10)
ax.tick_params(axis = "x", labelsize = 8 , rotation = 0)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d'))
ax.tick_params(axis = "y", labelsize = 8 )
ax.set_title(ticker +': Actual closing price vs OLS Model price').set_size(10)
plt.legend(loc=4, prop={"size":8})
plt.tight_layout()
plt.show()
def plot_twitter_sa(ticker):
'''
Creates a plot that displays a stock's tweets' sentiment score
movement over the past 8 days
Parameters
----------
ticker(string): a ticker symbol
'''
# tweet output file name
date_today = datetime.today()
today_str = date_today.strftime("%Y%m%d")
fint = ticker + '_Tweets_' + today_str + '.csv'
print("\n loading tweets about " + ticker + " ... (this can take a while)")
#collect tweets and calculate sentiment score if tweet file N/A
if path.exists(fint) == False:
calc_twitter_sa(ticker)
# load tweet dataset
df = | pd.read_csv(fint) | pandas.read_csv |
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for the zipline.assets package
"""
from contextlib import contextmanager
from datetime import timedelta
from functools import partial
import pickle
import sys
from types import GetSetDescriptorType
from unittest import TestCase
import uuid
import warnings
from nose_parameterized import parameterized
from numpy import full, int32, int64
import pandas as pd
from pandas.util.testing import assert_frame_equal
from six import PY2, viewkeys
import sqlalchemy as sa
from zipline.assets import (
Asset,
Equity,
Future,
AssetDBWriter,
AssetFinder,
)
from zipline.assets.synthetic import (
make_commodity_future_info,
make_rotating_equity_info,
make_simple_equity_info,
)
from six import itervalues, integer_types
from toolz import valmap
from zipline.assets.asset_writer import (
check_version_info,
write_version_info,
_futures_defaults,
SQLITE_MAX_VARIABLE_NUMBER,
)
from zipline.assets.asset_db_schema import ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import (
downgrade
)
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
AssetDBVersionError,
SidsNotFound,
SymbolNotFound,
AssetDBImpossibleDowngrade,
ValueNotFoundForField,
)
from zipline.testing import (
all_subindices,
empty_assets_db,
parameter_space,
tmp_assets_db,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.fixtures import (
WithAssetFinder,
ZiplineTestCase,
WithTradingCalendars,
)
from zipline.utils.range import range
@contextmanager
def build_lookup_generic_cases(asset_finder_type):
"""
Generate test cases for the type of asset finder specific by
asset_finder_type for test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'duplicated',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'TEST',
},
],
index='sid'
)
fof14_sid = 10000
futures = pd.DataFrame.from_records(
[
{
'sid': fof14_sid,
'symbol': 'FOF14',
'root_symbol': 'FO',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': 'FUT',
},
],
index='sid'
)
root_symbols = pd.DataFrame({
'root_symbol': ['FO'],
'root_symbol_id': [1],
'exchange': ['CME'],
})
with tmp_assets_db(
equities=equities, futures=futures, root_symbols=root_symbols) \
as assets_db:
finder = asset_finder_type(assets_db)
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
fof14 = finder.retrieve_asset(fof14_sid)
cf = finder.create_continuous_future(
root_symbol=fof14.root_symbol, offset=0, roll_style='volume',
)
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
yield (
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'DUPLICATED', dupe_0_start, dupe_0),
(finder, 'DUPLICATED', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'UNIQUE', unique_start, unique),
(finder, 'UNIQUE', None, unique),
# Futures
(finder, 'FOF14', None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, 'FOF14', unique_start, fof14),
# Futures int
(finder, fof14_sid, None, fof14),
# Future symbols should be unique, but including as_of date
# make sure that code path is exercised.
(finder, fof14_sid, unique_start, fof14),
# ContinuousFuture
(finder, cf, None, cf),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('DUPLICATED', 'UNIQUE'), dupe_0_start, [dupe_0, unique]),
(finder, ('DUPLICATED', 'UNIQUE'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('DUPLICATED', 2, 'UNIQUE', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
# Futures and Equities
(finder, ['FOF14', 0], None, [fof14, assets[0]]),
# ContinuousFuture and Equity
(finder, [cf, 0], None, [cf, assets[0]]),
)
class AssetTestCase(TestCase):
# Dynamically list the Asset properties we want to test.
asset_attrs = [name for name, value in vars(Asset).items()
if isinstance(value, GetSetDescriptorType)]
# Very wow
asset = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
auto_close_date=pd.Timestamp('2014-06-26 11:21AM', tz='UTC'),
exchange='THE MOON',
)
asset3 = Asset(3, exchange="test")
asset4 = Asset(4, exchange="test")
asset5 = Asset(5, exchange="still testing")
def test_asset_object(self):
the_asset = Asset(5061, exchange="bar")
self.assertEquals({5061: 'foo'}[the_asset], 'foo')
self.assertEquals(the_asset, 5061)
self.assertEquals(5061, the_asset)
self.assertEquals(the_asset, the_asset)
self.assertEquals(int(the_asset), 5061)
self.assertEquals(str(the_asset), 'Asset(5061)')
def test_to_and_from_dict(self):
asset_from_dict = Asset.from_dict(self.asset.to_dict())
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_from_dict, attr),
)
def test_asset_is_pickleable(self):
asset_unpickled = pickle.loads(pickle.dumps(self.asset))
for attr in self.asset_attrs:
self.assertEqual(
getattr(self.asset, attr), getattr(asset_unpickled, attr),
)
def test_asset_comparisons(self):
s_23 = Asset(23, exchange="test")
s_24 = Asset(24, exchange="test")
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertEqual(int32(23), s_23)
self.assertEqual(int64(23), s_23)
self.assertEqual(s_23, int32(23))
self.assertEqual(s_23, int64(23))
# Check all int types (includes long on py2):
for int_type in integer_types:
self.assertEqual(int_type(23), s_23)
self.assertEqual(s_23, int_type(23))
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
# Compare to a value that doesn't fit into a platform int:
self.assertNotEqual(s_23, sys.maxsize + 1)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(self.asset3 < self.asset4)
self.assertFalse(self.asset4 < self.asset4)
self.assertFalse(self.asset5 < self.asset4)
def test_le(self):
self.assertTrue(self.asset3 <= self.asset4)
self.assertTrue(self.asset4 <= self.asset4)
self.assertFalse(self.asset5 <= self.asset4)
def test_eq(self):
self.assertFalse(self.asset3 == self.asset4)
self.assertTrue(self.asset4 == self.asset4)
self.assertFalse(self.asset5 == self.asset4)
def test_ge(self):
self.assertFalse(self.asset3 >= self.asset4)
self.assertTrue(self.asset4 >= self.asset4)
self.assertTrue(self.asset5 >= self.asset4)
def test_gt(self):
self.assertFalse(self.asset3 > self.asset4)
self.assertFalse(self.asset4 > self.asset4)
self.assertTrue(self.asset5 > self.asset4)
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(self.asset3 < 'a')
self.assertIsNotNone('a' < self.asset3)
else:
with self.assertRaises(TypeError):
self.asset3 < 'a'
with self.assertRaises(TypeError):
'a' < self.asset3
class TestFuture(WithAssetFinder, ZiplineTestCase):
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
2468: {
'symbol': 'OMH15',
'root_symbol': 'OM',
'notice_date': pd.Timestamp('2014-01-20', tz='UTC'),
'expiration_date': pd.Timestamp('2014-02-20', tz='UTC'),
'auto_close_date': pd.Timestamp('2014-01-18', tz='UTC'),
'tick_size': .01,
'multiplier': 500.0,
'exchange': "TEST",
},
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'multiplier': 1.0,
'exchange': 'TEST',
},
},
orient='index',
)
@classmethod
def init_class_fixtures(cls):
super(TestFuture, cls).init_class_fixtures()
cls.future = cls.asset_finder.lookup_future_symbol('OMH15')
cls.future2 = cls.asset_finder.lookup_future_symbol('CLG06')
def test_str(self):
strd = str(self.future)
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = repr(self.future)
self.assertIn("Future", reprd)
self.assertIn("2468", reprd)
self.assertIn("OMH15", reprd)
self.assertIn("root_symbol=%s'OM'" % ('u' if PY2 else ''), reprd)
self.assertIn(
"notice_date=Timestamp('2014-01-20 00:00:00+0000', tz='UTC')",
reprd,
)
self.assertIn(
"expiration_date=Timestamp('2014-02-20 00:00:00+0000'",
reprd,
)
self.assertIn(
"auto_close_date=Timestamp('2014-01-18 00:00:00+0000'",
reprd,
)
self.assertIn("tick_size=0.01", reprd)
self.assertIn("multiplier=500", reprd)
def test_reduce(self):
assert_equal(
pickle.loads(pickle.dumps(self.future)).to_dict(),
self.future.to_dict(),
)
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
for field in _futures_defaults.keys():
self.assertTrue(field in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
def test_lookup_future_symbol(self):
"""
Test the lookup_future_symbol method.
"""
om = TestFuture.asset_finder.lookup_future_symbol('OMH15')
self.assertEqual(om.sid, 2468)
self.assertEqual(om.symbol, 'OMH15')
self.assertEqual(om.root_symbol, 'OM')
self.assertEqual(om.notice_date, pd.Timestamp('2014-01-20', tz='UTC'))
self.assertEqual(om.expiration_date,
pd.Timestamp('2014-02-20', tz='UTC'))
self.assertEqual(om.auto_close_date,
pd.Timestamp('2014-01-18', tz='UTC'))
cl = TestFuture.asset_finder.lookup_future_symbol('CLG06')
self.assertEqual(cl.sid, 0)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('#&?!')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('FOOBAR')
with self.assertRaises(SymbolNotFound):
TestFuture.asset_finder.lookup_future_symbol('XXX99')
class AssetFinderTestCase(WithTradingCalendars, ZiplineTestCase):
asset_finder_type = AssetFinder
def write_assets(self, **kwargs):
self._asset_writer.write(**kwargs)
def init_instance_fixtures(self):
super(AssetFinderTestCase, self).init_instance_fixtures()
conn = self.enter_instance_context(empty_assets_db())
self._asset_writer = AssetDBWriter(conn)
self.asset_finder = self.asset_finder_type(conn)
def test_blocked_lookup_symbol_query(self):
# we will try to query for more variables than sqlite supports
# to make sure we are properly chunking on the client side
as_of = pd.Timestamp('2013-01-01', tz='UTC')
# we need more sids than we can query from sqlite
nsids = SQLITE_MAX_VARIABLE_NUMBER + 10
sids = range(nsids)
frame = pd.DataFrame.from_records(
[
{
'sid': sid,
'symbol': 'TEST.%d' % sid,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for sid in sids
]
)
self.write_assets(equities=frame)
assets = self.asset_finder.retrieve_equities(sids)
assert_equal(viewkeys(assets), set(sids))
def test_lookup_symbol_delimited(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST.%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex
}
for i in range(3)
]
)
self.write_assets(equities=frame)
finder = self.asset_finder
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
# we do it twice to catch caching bugs
for i in range(2):
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST', as_of)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST1', as_of)
# '@' is not a supported delimiter
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('TEST@1', as_of)
# Adding an unnecessary fuzzy shouldn't matter.
for fuzzy_char in ['-', '/', '_', '.']:
self.assertEqual(
asset_1,
finder.lookup_symbol('TEST%s1' % fuzzy_char, as_of)
)
def test_lookup_symbol_fuzzy(self):
metadata = pd.DataFrame.from_records([
{'symbol': 'PRTY_HRD', 'exchange': "TEST"},
{'symbol': 'BRKA', 'exchange': "TEST"},
{'symbol': 'BRK_A', 'exchange': "TEST"},
])
self.write_assets(equities=metadata)
finder = self.asset_finder
dt = pd.Timestamp('2013-01-01', tz='UTC')
# Try combos of looking up PRTYHRD with and without a time or fuzzy
# Both non-fuzzys get no result
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', None)
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('PRTYHRD', dt)
# Both fuzzys work
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTYHRD', dt, fuzzy=True))
# Try combos of looking up PRTY_HRD, all returning sid 0
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', None, fuzzy=True))
self.assertEqual(0, finder.lookup_symbol('PRTY_HRD', dt, fuzzy=True))
# Try combos of looking up BRKA, all returning sid 1
self.assertEqual(1, finder.lookup_symbol('BRKA', None))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt))
self.assertEqual(1, finder.lookup_symbol('BRKA', None, fuzzy=True))
self.assertEqual(1, finder.lookup_symbol('BRKA', dt, fuzzy=True))
# Try combos of looking up BRK_A, all returning sid 2
self.assertEqual(2, finder.lookup_symbol('BRK_A', None))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt))
self.assertEqual(2, finder.lookup_symbol('BRK_A', None, fuzzy=True))
self.assertEqual(2, finder.lookup_symbol('BRK_A', dt, fuzzy=True))
def test_lookup_symbol_change_ticker(self):
T = partial(pd.Timestamp, tz='utc')
metadata = pd.DataFrame.from_records(
[
# sid 0
{
'symbol': 'A',
'asset_name': 'Asset A',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'B',
'asset_name': 'Asset B',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
# sid 1
{
'symbol': 'C',
'asset_name': 'Asset C',
'start_date': T('2014-01-01'),
'end_date': T('2014-01-05'),
'exchange': "TEST",
},
{
'symbol': 'A', # claiming the unused symbol 'A'
'asset_name': 'Asset A',
'start_date': T('2014-01-06'),
'end_date': T('2014-01-10'),
'exchange': "TEST",
},
],
index=[0, 0, 1, 1],
)
self.write_assets(equities=metadata)
finder = self.asset_finder
# note: these assertions walk forward in time, starting at assertions
# about ownership before the start_date and ending with assertions
# after the end_date; new assertions should be inserted in the correct
# locations
# no one held 'A' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('A', T('2013-12-31'))
# no one held 'C' before 01
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('C', T('2013-12-31'))
for asof in pd.date_range('2014-01-01', '2014-01-05', tz='utc'):
# from 01 through 05 sid 0 held 'A'
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(0),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(A_result.symbol, 'B')
assert_equal(A_result.asset_name, 'Asset B')
# from 01 through 05 sid 1 held 'C'
C_result = finder.lookup_symbol('C', asof)
assert_equal(
C_result,
finder.retrieve_asset(1),
msg=str(asof),
)
# The symbol and asset_name should always be the last held values
assert_equal(C_result.symbol, 'A')
assert_equal(C_result.asset_name, 'Asset A')
# no one held 'B' before 06
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('B', T('2014-01-05'))
# no one held 'C' after 06, however, no one has claimed it yet
# so it still maps to sid 1
assert_equal(
finder.lookup_symbol('C', T('2014-01-07')),
finder.retrieve_asset(1),
)
for asof in pd.date_range('2014-01-06', '2014-01-11', tz='utc'):
# from 06 through 10 sid 0 held 'B'
# we test through the 11th because sid 1 is the last to hold 'B'
# so it should ffill
B_result = finder.lookup_symbol('B', asof)
assert_equal(
B_result,
finder.retrieve_asset(0),
msg=str(asof),
)
assert_equal(B_result.symbol, 'B')
assert_equal(B_result.asset_name, 'Asset B')
# from 06 through 10 sid 1 held 'A'
# we test through the 11th because sid 1 is the last to hold 'A'
# so it should ffill
A_result = finder.lookup_symbol('A', asof)
assert_equal(
A_result,
finder.retrieve_asset(1),
msg=str(asof),
)
assert_equal(A_result.symbol, 'A')
assert_equal(A_result.asset_name, 'Asset A')
def test_lookup_symbol(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.write_assets(equities=df)
finder = self.asset_finder
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol('NON_EXISTING', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol('EXISTING', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol('EXISTING', date)
self.assertEqual(result.symbol, 'EXISTING')
self.assertEqual(result.sid, i)
def test_fail_to_write_overlapping_data(self):
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later end date.
{
'sid': 2,
'symbol': 'multiple',
'start_date': pd.Timestamp('2010-01-01'),
'end_date': pd.Timestamp('2013-01-01'),
'exchange': 'NYSE'
},
# Same as asset 1, but with a later start_date
{
'sid': 3,
'symbol': 'multiple',
'start_date': pd.Timestamp('2011-01-01'),
'end_date': pd.Timestamp('2012-01-01'),
'exchange': 'NYSE'
},
]
)
with self.assertRaises(ValueError) as e:
self.write_assets(equities=df)
self.assertEqual(
str(e.exception),
"Ambiguous ownership for 1 symbol, multiple assets held the"
" following symbols:\n"
"MULTIPLE:\n"
" intersections: (('2010-01-01 00:00:00', '2012-01-01 00:00:00'),"
" ('2011-01-01 00:00:00', '2012-01-01 00:00:00'))\n"
" start_date end_date\n"
" sid \n"
" 1 2010-01-01 2012-01-01\n"
" 2 2010-01-01 2013-01-01\n"
" 3 2011-01-01 2012-01-01"
)
def test_lookup_generic(self):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
with build_lookup_generic_cases(self.asset_finder_type) as cases:
for finder, symbols, reference_date, expected in cases:
results, missing = finder.lookup_generic(symbols,
reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_none_raises(self):
"""
If lookup_symbol is vectorized across multiple symbols, and one of them
is None, want to raise a TypeError.
"""
with self.assertRaises(TypeError):
self.asset_finder.lookup_symbol(None, pd.Timestamp('2013-01-01'))
def test_lookup_mult_are_one(self):
"""
Ensure that multiple symbols that return the same sid are collapsed to
a single returned asset.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': symbol,
'start_date': date.value,
'end_date': (date + timedelta(days=30)).value,
'exchange': 'NYSE',
}
for symbol in ('FOOB', 'FOO_B')
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol('FOO/B', date + timedelta(1), fuzzy=True)
self.assertEqual(result.sid, 1)
def test_endless_multiple_resolves(self):
"""
Situation:
1. Asset 1 w/ symbol FOOB changes to FOO_B, and then is delisted.
2. Asset 2 is listed with symbol FOO_B.
If someone asks for FOO_B with fuzzy matching after 2 has been listed,
they should be able to correctly get 2.
"""
date = pd.Timestamp('2013-01-01', tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': 1,
'symbol': 'FOOB',
'start_date': date.value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
{
'sid': 1,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=31)).value,
'end_date': (date + timedelta(days=60)).value,
'exchange': 'NYSE',
},
{
'sid': 2,
'symbol': 'FOO_B',
'start_date': (date + timedelta(days=61)).value,
'end_date': date.max.value,
'exchange': 'NYSE',
},
]
)
self.write_assets(equities=df)
finder = self.asset_finder
# If we are able to resolve this with any result, means that we did not
# raise a MultipleSymbolError.
result = finder.lookup_symbol(
'FOO/B',
date + timedelta(days=90),
fuzzy=True
)
self.assertEqual(result.sid, 2)
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': 'TEST',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.write_assets(equities=data)
finder = self.asset_finder
results, missing = finder.lookup_generic(
['REAL', 1, 'FAKE', 'REAL_BUT_OLD', 'REAL_BUT_IN_THE_FUTURE'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'REAL')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'ALSO_REAL')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'REAL_BUT_OLD')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'FAKE')
self.assertEqual(missing[1], 'REAL_BUT_IN_THE_FUTURE')
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end,
exchange="TEST")
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = self.asset_finder
asset1 = Equity(1, symbol="AAPL", exchange="TEST")
asset2 = Equity(2, symbol="GOOG", exchange="TEST")
asset200 = Future(200, symbol="CLK15", exchange="TEST")
asset201 = Future(201, symbol="CLM15", exchange="TEST")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
trading_day = self.trading_calendar.day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_equity_info(
num_assets=num_assets,
first_start=first_start,
frequency=trading_day,
periods_between_starts=3,
asset_lifetime=5
)
self.write_assets(equities=frame)
finder = self.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
self.write_assets(equities=make_simple_equity_info(
[0, 1, 2],
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
))
self.assertEqual({0, 1, 2}, set(self.asset_finder.sids))
def test_lookup_by_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 2,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
]
)
self.write_assets(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
)
af = self.asset_finder
# Before sid 0 has changed ALT_ID.
dt = pd.Timestamp('2013-6-28', tz='UTC')
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_0.sid, 0)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
# We don't know about this ALT_ID yet.
with self.assertRaisesRegexp(
ValueNotFoundForField,
"Value '{}' was not found for field '{}'.".format(
'100000002',
'ALT_ID',
)
):
af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
# After all assets have ended.
dt = pd.Timestamp('2014-01-02', tz='UTC')
asset_2 = af.lookup_by_supplementary_field('ALT_ID', '100000000', dt)
self.assertEqual(asset_2.sid, 2)
asset_1 = af.lookup_by_supplementary_field('ALT_ID', '100000001', dt)
self.assertEqual(asset_1.sid, 1)
asset_0 = af.lookup_by_supplementary_field('ALT_ID', '100000002', dt)
self.assertEqual(asset_0.sid, 0)
# At this point both sids 0 and 2 have held this value, so an
# as_of_date is required.
expected_in_repr = (
"Multiple occurrences of the value '{}' found for field '{}'."
).format('100000000', 'ALT_ID')
with self.assertRaisesRegexp(
MultipleValuesFoundForField,
expected_in_repr,
):
af.lookup_by_supplementary_field('ALT_ID', '100000000', None)
def test_get_supplementary_field(self):
equities = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'A',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 1,
'symbol': 'B',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
{
'sid': 2,
'symbol': 'C',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': 'TEST',
},
]
)
equity_supplementary_mappings = pd.DataFrame.from_records(
[
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2013-6-28', tz='UTC'),
},
{
'sid': 1,
'field': 'ALT_ID',
'value': '100000001',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 0,
'field': 'ALT_ID',
'value': '100000002',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
{
'sid': 2,
'field': 'ALT_ID',
'value': '100000000',
'start_date': pd.Timestamp('2013-7-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
},
]
)
self.write_assets(
equities=equities,
equity_supplementary_mappings=equity_supplementary_mappings,
)
finder = self.asset_finder
# Before sid 0 has changed ALT_ID and sid 2 has started.
dt = pd.Timestamp('2013-6-28', tz='UTC')
for sid, expected in [(0, '100000000'), (1, '100000001')]:
self.assertEqual(
finder.get_supplementary_field(sid, 'ALT_ID', dt),
expected,
)
# Since sid 2 has not yet started, we don't know about its
# ALT_ID.
with self.assertRaisesRegexp(
NoValueForSid,
"No '{}' value found for sid '{}'.".format('ALT_ID', 2),
):
finder.get_supplementary_field(2, 'ALT_ID', dt),
# After all assets have ended.
dt = pd.Timestamp('2014-01-02', tz='UTC')
for sid, expected in [
(0, '100000002'), (1, '100000001'), (2, '100000000'),
]:
self.assertEqual(
finder.get_supplementary_field(sid, 'ALT_ID', dt),
expected,
)
# Sid 0 has historically held two values for ALT_ID by this dt.
with self.assertRaisesRegexp(
MultipleValuesFoundForSid,
"Multiple '{}' values found for sid '{}'.".format('ALT_ID', 0),
):
finder.get_supplementary_field(0, 'ALT_ID', None),
def test_group_by_type(self):
equities = make_simple_equity_info(
range(5),
start_date= | pd.Timestamp('2014-01-01') | pandas.Timestamp |
from pandas_datareader import data as web
import pandas as pd
import datetime as dt
import numpy as np
import requests
http_header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
class PriceReader:
def __init__(self, brTickerList, usTickerList, startDate='2018-01-01'):
self.brTickerList = brTickerList
self.usTickerList = usTickerList
self.startDate = startDate
self.fillDate = dt.datetime.today().strftime('%m-%d-%Y')
self.df = pd.DataFrame(columns=['Date'])
def load(self):
# Read BR market data
if((self.brTickerList != None) and (len(self.brTickerList) > 0)):
self.df = self.readData(self.brTickerList, self.startDate).reset_index()
self.df.columns = self.df.columns.str.replace('\.SA','')
# Read US Market data
if((self.usTickerList != None) and (len(self.usTickerList) > 0)):
self.df = self.df.merge(self.readUSData(self.usTickerList, self.startDate).reset_index(), how='outer', on='Date')
self.df = self.df.set_index('Date').sort_index()
# self.df.to_csv('debug.csv', sep='\t')
indexList = ['^BVSP', '^GSPC', 'BRLUSD=X']
self.brlIndex = self.readUSData(indexList, self.startDate).reset_index()
self.brlIndex.rename(columns={'^BVSP':'IBOV', '^GSPC':'S&P500', 'BRLUSD=X':'USD'}, inplace=True)
self.brlIndex = self.brlIndex.set_index('Date')
# display(self.brlIndex)
def setFillDate(self, date):
self.fillDate = date
def fillCurrentValue(self, row):
row['PRICE'] = self.getCurrentValue(row['SYMBOL'], self.fillDate)
return row
def readData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + '.SA '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def readUSData(self, code, startDate='2018-01-01'):
s=''
for c in code:
s += c + ' '
tks = yf.Tickers(s)
dfs = tks.history(start=startDate)[['Close']]
dfs.columns = dfs.columns.droplevel()
return dfs
def getHistory(self, code, start='2018-01-01'):
return self.df.loc[start:][code]
def getCurrentValue(self, code, date=None):
if(date == None):
return self.df.iloc[-1][code]
available, date = self.checkLastAvailable(self.df, date, code)
if available:
return self.df.loc[date][code]
return self.df.iloc[0][code]
def getIndexHistory(self, code, end):
ret = self.brlIndex.loc[:end][code]
return ret.dropna()
def getIndexCurrentValue(self, code, date=None):
if(date == None):
return self.brlIndex.iloc[-1][code]
available,date = self.checkLastAvailable(self.brlIndex, date, code)
if available:
return self.brlIndex.loc[date][code]
return self.brlIndex.iloc[0][code]
def checkLastAvailable(self, dtframe, loockDate, field):
date = pd.to_datetime(loockDate)
day = pd.Timedelta(1, unit='d')
#Look for last available date
while((not (date in dtframe.index)) or pd.isna(dtframe.loc[date][field])):
date = date - day
if(date < dtframe.index[0]):
return False,0
return True,date
# -------------------------------------------------------------------------------------------------
class ADVFN_Page:
urlDict = [{ 'url': 'https://br.advfn.com/bolsa-de-valores/bovespa/{}/dividendos/historico-de-proventos', 'index': 5 },
{ 'url': 'https://br.advfn.com/bolsa-de-valores/bovespa/{}/dividendos', 'index': 6}]
def read(self, ticker):
res = pd.DataFrame()
for attempt in range(2):
url = self.urlDict[attempt]['url'].format(ticker)
r = requests.get(url, headers=http_header)
# print(url, urlDict[attempt]['index'])
try:
rawTable = pd.read_html(r.text, thousands='.',decimal=',')[self.urlDict[attempt]['index']]
# display(rawTable)
if(len(rawTable.columns) < 5):
raise
except:
continue
res = rawTable
if ('Mês de Referência' in res.columns):
res.rename(columns={'Mês de Referência':'Tipo do Provento'}, inplace=True)
res['Tipo do Provento'] = 'Dividendo'
res.rename(columns={'Tipo do Provento':'OPERATION', 'Data-Com':'DATE', 'Pagamento':'PAYDATE', 'Valor':'PRICE', 'Dividend Yield':'YIELD'}, inplace=True)
break
return res
class Fundamentus_Page:
urlDict = [ { 'url': 'https://www.fundamentus.com.br/proventos.php?papel={}&tipo=2', 'index': 0 },
{ 'url': 'https://www.fundamentus.com.br/fii_proventos.php?papel={}&tipo=2', 'index': 0}]
def read(self, ticker):
res = pd.DataFrame()
# if (ticker != 'SMLS3'):
# return res
for attempt in range(2):
url = self.urlDict[attempt]['url'].format(ticker)
r = requests.get(url, headers=http_header)
# print(url, self.urlDict[attempt]['index'])
try:
rawTable = pd.read_html(r.text, thousands='.',decimal=',')[self.urlDict[attempt]['index']]
# print(rawTable)
if(len(rawTable.columns) < 4):
raise
except:
continue
res = rawTable
if('Por quantas ações' in res.columns):
res['Valor'] /= res['Por quantas ações']
if ('Última Data Com' in res.columns):
res.rename(columns={'Última Data Com':'Data'}, inplace=True)
res.rename(columns={'Tipo':'OPERATION', 'Data':'DATE', 'Data de Pagamento':'PAYDATE', 'Valor':'PRICE'}, inplace=True)
break
# print(res)
return res
class DividendReader:
def __init__(self, dataFrame, startDate='2018-01-01'):
self.brTickerList = dataFrame[dataFrame['TYPE'] == 'Ação']['SYMBOL'].unique()
self.usTickerList = dataFrame[dataFrame['TYPE'] == 'STOCK']['SYMBOL'].unique()
self.fiiList = dataFrame[dataFrame['TYPE'] == 'FII']['SYMBOL'].unique()
self.startDate=startDate
self.df = pd.DataFrame(columns=['SYMBOL', 'PRICE', 'PAYDATE'])
def __init__(self, brTickers, fiiTickers, usTickers, startDate='2018-01-01'):
self.brTickerList = brTickers
self.usTickerList = usTickers
self.fiiList = fiiTickers
self.startDate = startDate
self.df = | pd.DataFrame(columns=['SYMBOL', 'DATE','PRICE', 'PAYDATE']) | pandas.DataFrame |
import json
import os
import pandas as pd
from corai_error import Error_type_setter, Error_not_allowed_input
from corai_util.tools.src.function_iterable import sorted_alphanumeric
from corai_util.tools.src.function_json import zip_json, unzip_json
class Estimator(object):
"""
Semantics:
Class Estimator is an adaptor from the dataframes from pandas.
We use Pandas since it is fairly rampant and easy to use.
We store the data in the following way: each column is one feature, each row one estimation.
The objective of it is to automatise some behavior one could have regarding some containers.
For example, always plotting the same objects in the same way, or the same estimates.
Addit. to dataframes, estimator also allows to store additional information.
Currently, pandas does not offer such functionality.
For this reason, Estimator is a common ground for all derived class from Estimator.
It is good practice to put the names of the columns / features in the class object, as a security.
"""
CORE_COL = set() # we use a set in order to avoid redundancy. However, Dataframes requires lists and for this reason we always
def __init__(self, df=None, *args, **kwargs):
# args and kwargs for the child super() method. Do not forget them in child classes.
if df is not None:
# test that the columns of the df are the right one, corresponding to the class argument.
# the fact that we use self. ensures that we use polymorphism.
self.df = df # we do that first to check that it is a dataframe.
if self.CORE_COL.issubset(df.columns):
super().__init__()
else:
raise Error_type_setter("Problem, the columns of the dataframe do not match the predefined ones.")
# if no df, we create an empty one.
else:
self.df = pd.DataFrame(columns=list(self.CORE_COL))
super().__init__()
def __repr__(self):
# this is for the print function.
# We want the estimator to inherit the properties from DF!
return repr(self.df)
# section ######################################################################
# #############################################################################
# constructors
@classmethod
def from_csv(cls, path, **kwargs):
"""
Semantics:
Constructor estimator with a path.
Args:
path: string, path to a CSV file/txt.
kwargs: additional key words argument for pd.read_csv
Returns: new estimator.
"""
if os.path.getsize(path) <= 0:
raise Error_not_allowed_input("The input csv file cannot be empty")
return cls(df=pd.read_csv(path, **kwargs)) # calling the constructor of the class.
@classmethod
def from_json(cls, path, **kwargs):
"""
Semantics:
Read json dataframe and construct the object.
The json must not contain any extra attributes.
In case one wants the constructor to add extra/meta attributes to the child estimator:
- override from_json with a function calling from_json_attributes and retrieve the attributes,
- then call super().from_json to create the estimator,
- finally add to the estimator the attributes.
- at the end, the function needs to rewrite the original json back together.
use the parameter compressed if one wants to allow compressing / decompression.
Args:
path: The path where to retrieve the dataframe from. Extension json needed.
Returns:
Void
Postcondition:
the json at path remains identical.
Examples of overriding:
def from_json(cls, path, compressed=True):
attrs = super().from_json_attributes(path, compressed)
estimator = super().from_json(path)
estimator.name = attrs['name']
estimator.to_json(path=path, compress=compressed)
return estimator
"""
if os.path.getsize(path) <= 0:
raise Error_not_allowed_input("The input json file cannot be empty")
dataframe = | pd.read_json(path, orient='split') | pandas.read_json |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 23 10:59:53 2018
@author: <NAME>
this code predicts if a passenger would survive or not from the titanic
ship wreck
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
dataset= pd.read_csv('train.csv')
#using seaborn to visualize colums with more missing values
null_value=sns.heatmap(dataset.isnull(), yticklabels=False, cmap='viridis', cbar=False)
#using seaborn to count the number of survivors based on sex
sns.countplot(x='Survived', hue='Sex', data=dataset)
#using seaborn to count the number of survivors based on class
sns.countplot(x='Survived', hue='Pclass', data=dataset)
#distribution plot of ages
dataset['Age'].plot.hist()
#using seaborn to count the sibling/spouse
sns.countplot(x='SibSp', data=dataset)
#distribution plot of fare
dataset['Fare'].plot.hist()
#find the missing age based on the Pclass
dataset.groupby('Pclass').mean()
def get_age(cols):
Age=cols[0]
Pclass=cols[1]
if pd.isnull(Age):
if Pclass ==1:
return 38
elif Pclass == 2:
return 29
else:
return 25
else:
return Age
dataset['Age'] = dataset[['Age', 'Pclass']].apply(get_age, axis=1)
#drop this column since there are so many missing values
dataset.drop('Cabin', axis=1, inplace=True)
#converting categorical variables to dummy or indicator variables
sex= | pd.get_dummies(dataset['Sex'], drop_first=True) | pandas.get_dummies |
# BOILERPLACE
# -------------------------------------->
import sys
sys.path.insert(0, './src')
# -------------------------------------->
import numpy as np
import json
import pandas as pd
from conversation_analytics_toolkit import transformation as pathflow_transformation
def _load_test_logs_file(file):
with open('./test/conversation_analytics_toolkit/testdata/'+ file) as f:
try:
data = json.load(f)
except ValueError:
data = []
df = pd.DataFrame.from_records(data)
return df
def test_to_canonical_WA_empty_input_df():
df = _load_test_logs_file('df_logs_empty.json')
workspace_nodes = | pd.read_csv('./test/conversation_analytics_toolkit/testdata/workspace_nodes.csv') | pandas.read_csv |
import threading
import time
import datetime
import pandas as pd
from functools import reduce, wraps
from datetime import datetime, timedelta
import numpy as np
from scipy.stats import zscore
import model.queries as qrs
from model.NodesMetaData import NodesMetaData
import utils.helpers as hp
from utils.helpers import timer
import parquet_creation as pcr
import glob
import os
import dask
import dask.dataframe as dd
class Singleton(type):
def __init__(cls, name, bases, attibutes):
cls._dict = {}
cls._registered = []
def __call__(cls, dateFrom=None, dateTo=None, *args):
print('* OBJECT DICT ', len(cls._dict), cls._dict)
if (dateFrom is None) or (dateTo is None):
defaultDT = hp.defaultTimeRange()
dateFrom = defaultDT[0]
dateTo = defaultDT[1]
if (dateFrom, dateTo) in cls._dict:
print('** OBJECT EXISTS', cls, dateFrom, dateTo)
instance = cls._dict[(dateFrom, dateTo)]
else:
print('** OBJECT DOES NOT EXIST', cls, dateFrom, dateTo)
if (len(cls._dict) > 0) and ([dateFrom, dateTo] != cls._registered):
print('*** provide the latest and start thread', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
refresh = threading.Thread(target=cls.nextPeriodData, args=(dateFrom, dateTo, *args))
refresh.start()
elif ([dateFrom, dateTo] == cls._registered):
print('*** provide the latest', cls, dateFrom, dateTo)
instance = cls._dict[list(cls._dict.keys())[-1]]
elif (len(cls._dict) == 0):
print('*** no data yet, refresh and wait', cls, dateFrom, dateTo)
cls.nextPeriodData(dateFrom, dateTo, *args)
instance = cls._dict[(dateFrom, dateTo)]
# keep only a few objects in memory
if len(cls._dict) >= 2:
cls._dict.pop(list(cls._dict.keys())[0])
return instance
def nextPeriodData(cls, dateFrom, dateTo, *args):
print(f'**** thread started for {cls}')
cls._registered = [dateFrom, dateTo]
instance = super().__call__(dateFrom, dateTo, *args)
cls._dict[(dateFrom, dateTo)] = instance
print(f'**** thread finished for {cls}')
class Updater(object):
def __init__(self):
self.StartThread()
@timer
def UpdateAllData(self):
print()
print(f'{datetime.now()} New data is on its way at {datetime.utcnow()}')
print('Active threads:',threading.active_count())
# query period must be the same for all data loaders
defaultDT = hp.defaultTimeRange()
GeneralDataLoader(defaultDT[0], defaultDT[1])
SiteDataLoader(defaultDT[0], defaultDT[1])
PrtoblematicPairsDataLoader(defaultDT[0], defaultDT[1])
SitesRanksDataLoader(defaultDT[0], defaultDT[1])
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.UpdateAllData) # 1hour
thread.daemon = True
thread.start()
class ParquetUpdater(object):
def __init__(self):
self.StartThread()
@timer
def Update(self):
print('Starting Parquet Updater')
limit = pcr.limit
indices = pcr.indices
files = glob.glob('..\parquet\*')
print('files',files)
file_end = str(int(limit*24))
print('end of file trigger',file_end)
for f in files:
if f.endswith(file_end):
os.remove(f)
files = glob.glob('..\parquet\*')
print('files2',files)
for idx in indices:
j=int((limit*24)-1)
print('idx',idx,'j',j)
for f in files[::-1]:
file_end = str(idx)
end = file_end+str(j)
print('f',f,'end',end)
if f.endswith(end):
new_name = file_end+str(j+1)
head = '..\parquet\\'
final = head+new_name
print('f',f,'final',final)
os.rename(f,final)
j -= 1
jobs = []
limit = 1/24
timerange = pcr.queryrange(limit)
for idx in indices:
thread = threading.Thread(target=pcr.btwfunc,args=(idx,timerange))
jobs.append(thread)
for j in jobs:
j.start()
for j in jobs:
j.join()
# print('Finished Querying')
for idx in indices:
filenames = pcr.ReadParquet(idx,limit)
if idx == 'ps_packetloss':
print(filenames)
plsdf = dd.read_parquet(filenames).compute()
print('Before drops',len(plsdf))
plsdf = plsdf.drop_duplicates()
print('After Drops',len(plsdf))
print('packetloss\n',plsdf)
if idx == 'ps_owd':
owddf = dd.read_parquet(filenames).compute()
print('owd\n',owddf)
if idx == 'ps_retransmits':
rtmdf = dd.read_parquet(filenames).compute()
print('retransmits\n',rtmdf)
if idx == 'ps_throughput':
trpdf = dd.read_parquet(filenames).compute()
print('throughput\n',trpdf)
print('dask df complete')
self.lastUpdated = hp.roundTime(datetime.utcnow())
self.StartThread()
def StartThread(self):
thread = threading.Timer(3600, self.Update) # 1hour
thread.daemon = True
thread.start()
class GeneralDataLoader(object, metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.lastUpdated = None
self.pls = pd.DataFrame()
self.owd = pd.DataFrame()
self.thp = pd.DataFrame()
self.rtm = pd.DataFrame()
self.UpdateGeneralInfo()
@property
def dateFrom(self):
return self._dateFrom
@dateFrom.setter
def dateFrom(self, value):
self._dateFrom = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def dateTo(self):
return self._dateTo
@dateTo.setter
def dateTo(self, value):
self._dateTo = int(time.mktime(datetime.strptime(value, "%Y-%m-%d %H:%M").timetuple())*1000)
@property
def lastUpdated(self):
return self._lastUpdated
@lastUpdated.setter
def lastUpdated(self, value):
self._lastUpdated = value
@timer
def UpdateGeneralInfo(self):
# print("last updated: {0}, new start: {1} new end: {2} ".format(self.lastUpdated, self.dateFrom, self.dateTo))
self.pls = NodesMetaData('ps_packetloss', self.dateFrom, self.dateTo).df
self.owd = NodesMetaData('ps_owd', self.dateFrom, self.dateTo).df
self.thp = NodesMetaData('ps_throughput', self.dateFrom, self.dateTo).df
self.rtm = NodesMetaData('ps_retransmits', self.dateFrom, self.dateTo).df
self.latency_df = pd.merge(self.pls, self.owd, how='outer')
self.throughput_df = pd.merge(self.thp, self.rtm, how='outer')
all_df = pd.merge(self.latency_df, self.throughput_df, how='outer')
self.all_df = all_df.drop_duplicates()
self.pls_related_only = self.pls[self.pls['host_in_ps_meta'] == True]
self.owd_related_only = self.owd[self.owd['host_in_ps_meta'] == True]
self.thp_related_only = self.thp[self.thp['host_in_ps_meta'] == True]
self.rtm_related_only = self.rtm[self.rtm['host_in_ps_meta'] == True]
self.latency_df_related_only = self.latency_df[self.latency_df['host_in_ps_meta'] == True]
self.throughput_df_related_only = self.throughput_df[self.throughput_df['host_in_ps_meta'] == True]
self.all_df_related_only = self.all_df[self.all_df['host_in_ps_meta'] == True]
self.all_tested_pairs = self.getAllTestedPairs()
self.lastUpdated = datetime.now()
def getAllTestedPairs(self):
all_df = self.all_df[['host', 'ip']]
df = pd.DataFrame(qrs.queryAllTestedPairs([self.dateFrom, self.dateTo]))
df = pd.merge(all_df, df, left_on='ip', right_on='src', how='right')
df = pd.merge(all_df, df, left_on='ip', right_on='dest', how='right', suffixes=('_dest', '_src'))
df.drop_duplicates(keep='first', inplace=True)
df = df.sort_values(['host_src', 'host_dest'])
df['host_dest'] = df['host_dest'].fillna('N/A')
df['host_src'] = df['host_src'].fillna('N/A')
df['source'] = df[['host_src', 'src']].apply(lambda x: ': '.join(x), axis=1)
df['destination'] = df[['host_dest', 'dest']].apply(lambda x: ': '.join(x), axis=1)
# df = df.sort_values(by=['host_src', 'host_dest'], ascending=False)
df = df[['host_dest', 'host_src', 'idx', 'src', 'dest', 'source', 'destination']]
return df
class SiteDataLoader(object, metaclass=Singleton):
genData = GeneralDataLoader()
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.UpdateSiteData()
def UpdateSiteData(self):
# print('UpdateSiteData >>> ', h self.dateFrom, self.dateTo)
pls_site_in_out = self.InOutDf("ps_packetloss", self.genData.pls_related_only)
self.pls_data = pls_site_in_out['data']
self.pls_dates = pls_site_in_out['dates']
owd_site_in_out = self.InOutDf("ps_owd", self.genData.owd_related_only)
self.owd_data = owd_site_in_out['data']
self.owd_dates = owd_site_in_out['dates']
thp_site_in_out = self.InOutDf("ps_throughput", self.genData.thp_related_only)
self.thp_data = thp_site_in_out['data']
self.thp_dates = thp_site_in_out['dates']
rtm_site_in_out = self.InOutDf("ps_retransmits", self.genData.rtm_related_only)
self.rtm_data = rtm_site_in_out['data']
self.rtm_dates = rtm_site_in_out['dates']
self.latency_df_related_only = self.genData.latency_df_related_only
self.throughput_df_related_only = self.genData.throughput_df_related_only
self.sites = self.orderSites()
@timer
def InOutDf(self, idx, idx_df):
print(idx)
in_out_values = []
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for t in ['dest_host', 'src_host']:
meta_df = idx_df.copy()
df = pd.DataFrame(qrs.queryDailyAvg(idx, t, time_list[0], time_list[1])).reset_index()
df['index'] = pd.to_datetime(df['index'], unit='ms').dt.strftime('%d/%m')
df = df.transpose()
header = df.iloc[0]
df = df[1:]
df.columns = ['day-3', 'day-2', 'day-1', 'day']
meta_df = pd.merge(meta_df, df, left_on="host", right_index=True)
three_days_ago = meta_df.groupby('site').agg({'day-3': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
two_days_ago = meta_df.groupby('site').agg({'day-2': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
one_day_ago = meta_df.groupby('site').agg({'day-1': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
today = meta_df.groupby('site').agg({'day': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
site_avg_df = reduce(lambda x,y: pd.merge(x,y, on='site', how='outer'), [three_days_ago, two_days_ago, one_day_ago, today])
site_avg_df.set_index('site', inplace=True)
change = site_avg_df.pct_change(axis='columns')
site_avg_df = pd.merge(site_avg_df, change, left_index=True, right_index=True, suffixes=('_val', ''))
site_avg_df['direction'] = 'IN' if t == 'dest_host' else 'OUT'
in_out_values.append(site_avg_df)
site_df = pd.concat(in_out_values).reset_index()
site_df = site_df.round(2)
return {"data": site_df,
"dates": header}
def orderSites(self):
problematic = []
problematic.extend(self.thp_data.nsmallest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.rtm_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.pls_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic.extend(self.owd_data.nlargest(20, ['day-3_val', 'day-2_val', 'day-1_val', 'day_val'])['site'].values)
problematic = list(set(problematic))
all_df = self.genData.all_df_related_only.copy()
all_df['has_problems'] = all_df['site'].apply(lambda x: True if x in problematic else False)
sites = all_df.sort_values(by='has_problems', ascending=False).drop_duplicates(['site'])['site'].values
return sites
class PrtoblematicPairsDataLoader(object, metaclass=Singleton):
gobj = GeneralDataLoader()
LIST_IDXS = ['ps_packetloss', 'ps_owd', 'ps_retransmits', 'ps_throughput']
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.all_df = self.gobj.all_df_related_only[['ip', 'is_ipv6', 'host', 'site', 'admin_email', 'admin_name', 'ip_in_ps_meta',
'host_in_ps_meta', 'host_index', 'site_index', 'host_meta', 'site_meta']].sort_values(by=['ip_in_ps_meta', 'host_in_ps_meta', 'ip'], ascending=False)
self.df = self.markNodes()
@timer
def buildProblems(self, idx):
print('buildProblems...',idx)
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(len(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
@timer
def getPercentageMeasuresDone(self, grouped, tempdf):
measures_done = tempdf.groupby('hash').agg({'doc_count':'sum'})
def findRatio(row, total_minutes):
if pd.isna(row['doc_count']):
count = '0'
else: count = str(round((row['doc_count']/total_minutes)*100))+'%'
return count
one_test_per_min = hp.CalcMinutes4Period(self.dateFrom, self.dateTo)
measures_done['tests_done'] = measures_done.apply(lambda x: findRatio(x, one_test_per_min), axis=1)
grouped = pd.merge(grouped, measures_done, on='hash', how='left')
return grouped
# @timer
def markNodes(self):
df = pd.DataFrame()
for idx in hp.INDECES:
tempdf = pd.DataFrame(self.buildProblems(idx))
grouped = tempdf.groupby(['src', 'dest', 'hash']).agg({'value': lambda x: x.mean(skipna=False)}, axis=1).reset_index()
grouped = self.getRelHosts(grouped)
# zscore based on a each pair value
tempdf['zscore'] = tempdf.groupby('hash')['value'].apply(lambda x: (x - x.mean())/x.std())
# add max zscore so that it is possible to order by worst
max_z = tempdf.groupby('hash').agg({'zscore':'max'}).rename(columns={'zscore':'max_hash_zscore'})
grouped = pd.merge(grouped, max_z, on='hash', how='left')
# zscore based on the whole dataset
grouped['zscore'] = grouped[['value']].apply(lambda x: (x - x.mean())/x.std())
grouped['idx'] = idx
# calculate the percentage of measures based on the assumption that ideally measures are done once every minute
grouped = self.getPercentageMeasuresDone(grouped, tempdf)
# this is not accurate since we have some cases with 4-5 times more tests than expected
# avg_numtests = tempdf.groupby('hash').agg({'doc_count':'mean'}).values[0][0]
# Add flags for some general problems
if (idx == 'ps_packetloss'):
grouped['all_packets_lost'] = grouped['hash'].apply(lambda x: 1 if x in grouped[grouped['value']==1]['hash'].values else 0)
else: grouped['all_packets_lost'] = -1
def checkThreshold(value):
if (idx == 'ps_packetloss'):
if value > 0.05:
return 1
return 0
elif (idx == 'ps_owd'):
if value > 1000 or value < 0:
return 1
return 0
elif (idx == 'ps_throughput'):
if round(value/1e+6, 2) < 25:
return 1
return 0
elif (idx == 'ps_retransmits'):
if value > 100000:
return 1
return 0
grouped['threshold_reached'] = grouped['value'].apply(lambda row: checkThreshold(row))
grouped['has_bursts'] = grouped['hash'].apply(lambda x: 1
if x in tempdf[tempdf['zscore']>5]['hash'].values
else 0)
grouped['src_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['src'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['dest_not_in'] = grouped['hash'].apply(lambda x: 1
if x in grouped[grouped['dest'].isin(self.all_df['ip']) == False]['hash'].values
else 0)
grouped['measures'] = grouped['doc_count'].astype(str)+'('+grouped['tests_done'].astype(str)+')'
df = df.append(grouped, ignore_index=True)
df.fillna('N/A', inplace=True)
print(f'Total number of hashes: {len(df)}')
return df
@timer
def getValues(self, probdf):
# probdf = markNodes()
df = pd.DataFrame(columns=['timestamp', 'value', 'idx', 'hash'])
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo)
for item in probdf[['src', 'dest', 'idx']].values:
tempdf = pd.DataFrame(qrs.queryAllValues(item[2], item, time_list[0], time_list[1]))
tempdf['idx'] = item[2]
tempdf['hash'] = item[0]+"-"+item[1]
tempdf['src'] = item[0]
tempdf['dest'] = item[1]
tempdf.rename(columns={hp.getValueField(item[2]): 'value'}, inplace=True)
df = df.append(tempdf, ignore_index=True)
return df
@timer
def getRelHosts(self, probdf):
df1 = pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['src', 'hash']], left_on='ip', right_on='src', how='right')
df2 = pd.merge(self.all_df[['host', 'ip', 'site']], probdf[['dest', 'hash']], left_on='ip', right_on='dest', how='right')
df = pd.merge(df1, df2, on=['hash'], suffixes=('_src', '_dest'), how='inner')
df = df[df.duplicated(subset=['hash'])==False]
df = df.drop(columns=['ip_src', 'ip_dest'])
df = pd.merge(probdf, df, on=['hash', 'src', 'dest'], how='left')
return df
class SitesRanksDataLoader(metaclass=Singleton):
def __init__(self, dateFrom, dateTo):
self.dateFrom = dateFrom
self.dateTo = dateTo
self.all_df = GeneralDataLoader().all_df_related_only
self.locdf = pd.DataFrame.from_dict(qrs.queryNodesGeoLocation(), orient='index').reset_index().rename(columns={'index':'ip'})
self.measures = pd.DataFrame()
self.df = self.calculateRank()
def FixMissingLocations(self):
df = pd.merge(self.all_df, self.locdf, left_on=['ip'], right_on=['ip'], how='left')
df = df.drop(columns=['site_y', 'host_y']).rename(columns={'site_x': 'site', 'host_x': 'host'})
df["lat"] = pd.to_numeric(df["lat"])
df["lon"] = pd.to_numeric(df["lon"])
for i, row in df.iterrows():
if row['lat'] != row['lat'] or row['lat'] is None:
site = row['site']
host = row['host']
lon = df[(df['site']==site)&(df['lon'].notnull())].agg({'lon':'mean'})['lon']
lat = df[(df['site']==site)&(df['lat'].notnull())].agg({'lat':'mean'})['lat']
if lat!=lat or lon!=lon:
lon = df[(df['host']==host)&(df['lon'].notnull())].agg({'lon':'mean'})['lon']
lat = df[(df['host']==host)&(df['lat'].notnull())].agg({'lat':'mean'})['lat']
df.loc[i, 'lon'] = lon
df.loc[i, 'lat'] = lat
return df
def queryData(self, idx):
data = []
intv = int(hp.CalcMinutes4Period(self.dateFrom, self.dateTo)/60)
time_list = hp.GetTimeRanges(self.dateFrom, self.dateTo, intv)
for i in range(len(time_list)-1):
data.extend(qrs.query4Avg(idx, time_list[i], time_list[i+1]))
return data
def calculateRank(self):
df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# -*- coding: utf-8 -*-
import datetime
import decimal
from io import StringIO
import textwrap
from unittest import mock
import db_dtypes
import numpy
import pandas
import pandas.testing
import pytest
from pandas_gbq import exceptions
from pandas_gbq.features import FEATURES
from pandas_gbq import load
def load_method(bqclient, api_method):
if not FEATURES.bigquery_has_from_dataframe_with_csv and api_method == "load_csv":
return bqclient.load_table_from_file
return bqclient.load_table_from_dataframe
def test_encode_chunk_with_unicode():
"""Test that a dataframe containing unicode can be encoded as a file.
See: https://github.com/pydata/pandas-gbq/issues/106
"""
df = pandas.DataFrame(
numpy.random.randn(6, 4), index=range(6), columns=list("ABCD")
)
df["s"] = "信用卡"
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert "信用卡" in csv_string
def test_encode_chunk_with_floats():
"""Test that floats in a dataframe are encoded with at most 17 significant
figures.
See: https://github.com/pydata/pandas-gbq/issues/192 and
https://github.com/pydata/pandas-gbq/issues/326
"""
input_csv = textwrap.dedent(
"""01/01/17 23:00,0.14285714285714285,4
01/02/17 22:00,1.05148,3
01/03/17 21:00,1.05153,2
01/04/17 20:00,3.141592653589793,1
01/05/17 19:00,2.0988936657440586e+43,0
"""
)
input_df = pandas.read_csv(
StringIO(input_csv), header=None, float_precision="round_trip"
)
csv_buffer = load.encode_chunk(input_df)
round_trip = pandas.read_csv(csv_buffer, header=None, float_precision="round_trip")
pandas.testing.assert_frame_equal(
round_trip,
input_df,
check_exact=True,
)
def test_encode_chunk_with_newlines():
"""See: https://github.com/pydata/pandas-gbq/issues/180"""
df = pandas.DataFrame({"s": ["abcd", "ef\ngh", "ij\r\nkl"]})
csv_buffer = load.encode_chunk(df)
csv_bytes = csv_buffer.read()
csv_string = csv_bytes.decode("utf-8")
assert "abcd" in csv_string
assert '"ef\ngh"' in csv_string
assert '"ij\r\nkl"' in csv_string
def test_split_dataframe():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df, chunksize=2))
assert len(chunks) == 3
remaining, chunk = chunks[0]
assert remaining == 4
assert len(chunk.index) == 2
def test_encode_chunks_with_chunksize_none():
df = pandas.DataFrame(numpy.random.randn(6, 4), index=range(6))
chunks = list(load.split_dataframe(df))
assert len(chunks) == 1
remaining, chunk = chunks[0]
assert remaining == 0
assert len(chunk.index) == 6
def test_load_csv_from_dataframe_allows_client_to_generate_schema(mock_bigquery_client):
import google.cloud.bigquery
df = pandas.DataFrame({"int_col": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
_ = list(
load.load_csv_from_dataframe(
mock_bigquery_client, df, destination, None, None, None
)
)
mock_load = mock_bigquery_client.load_table_from_dataframe
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
assert kwargs["job_config"].schema is None
def test_load_csv_from_file_generates_schema(mock_bigquery_client):
import google.cloud.bigquery
df = pandas.DataFrame(
{
"int_col": [1, 2, 3],
"bool_col": [True, False, True],
"float_col": [0.0, 1.25, -2.75],
"string_col": ["a", "b", "c"],
"datetime_col": pandas.Series(
[
"2021-12-21 13:28:40.123789",
"2000-01-01 11:10:09",
"2040-10-31 23:59:59.999999",
],
dtype="datetime64[ns]",
),
"timestamp_col": pandas.Series(
[
"2021-12-21 13:28:40.123789",
"2000-01-01 11:10:09",
"2040-10-31 23:59:59.999999",
],
dtype="datetime64[ns]",
).dt.tz_localize(datetime.timezone.utc),
}
)
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
_ = list(
load.load_csv_from_file(mock_bigquery_client, df, destination, None, None, None)
)
mock_load = mock_bigquery_client.load_table_from_file
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
sent_schema = kwargs["job_config"].schema
assert len(sent_schema) == len(df.columns)
assert sent_schema[0].name == "int_col"
assert sent_schema[0].field_type == "INTEGER"
assert sent_schema[1].name == "bool_col"
assert sent_schema[1].field_type == "BOOLEAN"
assert sent_schema[2].name == "float_col"
assert sent_schema[2].field_type == "FLOAT"
assert sent_schema[3].name == "string_col"
assert sent_schema[3].field_type == "STRING"
# TODO: Disambiguate TIMESTAMP from DATETIME based on if column is
# localized or at least use field type from table metadata. See:
# https://github.com/googleapis/python-bigquery-pandas/issues/450
assert sent_schema[4].name == "datetime_col"
assert sent_schema[4].field_type == "TIMESTAMP"
assert sent_schema[5].name == "timestamp_col"
assert sent_schema[5].field_type == "TIMESTAMP"
@pytest.mark.parametrize(
["bigquery_has_from_dataframe_with_csv", "api_method"],
[(True, "load_parquet"), (True, "load_csv"), (False, "load_csv")],
)
def test_load_chunks_omits_policy_tags(
monkeypatch, mock_bigquery_client, bigquery_has_from_dataframe_with_csv, api_method
):
"""Ensure that policyTags are omitted.
We don't want to change the policyTags via a load job, as this can cause
403 error. See: https://github.com/googleapis/python-bigquery/pull/557
"""
import google.cloud.bigquery
monkeypatch.setattr(
type(FEATURES),
"bigquery_has_from_dataframe_with_csv",
mock.PropertyMock(return_value=bigquery_has_from_dataframe_with_csv),
)
df = pandas.DataFrame({"col1": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
schema = {
"fields": [
{"name": "col1", "type": "INT64", "policyTags": {"names": ["tag1", "tag2"]}}
]
}
_ = list(
load.load_chunks(
mock_bigquery_client, df, destination, schema=schema, api_method=api_method
)
)
mock_load = load_method(mock_bigquery_client, api_method=api_method)
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
sent_field = kwargs["job_config"].schema[0].to_api_repr()
assert "policyTags" not in sent_field
def test_load_chunks_with_invalid_api_method():
with pytest.raises(ValueError, match="Got unexpected api_method:"):
load.load_chunks(None, None, None, api_method="not_a_thing")
def test_load_parquet_allows_client_to_generate_schema(mock_bigquery_client):
import google.cloud.bigquery
df = pandas.DataFrame({"int_col": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
load.load_parquet(mock_bigquery_client, df, destination, None, None)
mock_load = mock_bigquery_client.load_table_from_dataframe
assert mock_load.called
_, kwargs = mock_load.call_args
assert "job_config" in kwargs
assert kwargs["job_config"].schema is None
def test_load_parquet_with_bad_conversion(mock_bigquery_client):
import google.cloud.bigquery
import pyarrow
mock_bigquery_client.load_table_from_dataframe.side_effect = (
pyarrow.lib.ArrowInvalid()
)
df = pandas.DataFrame({"int_col": [1, 2, 3]})
destination = google.cloud.bigquery.TableReference.from_string(
"my-project.my_dataset.my_table"
)
with pytest.raises(exceptions.ConversionError):
load.load_parquet(mock_bigquery_client, df, destination, None, None)
@pytest.mark.parametrize(
("numeric_type",),
(
("NUMERIC",),
("DECIMAL",),
("BIGNUMERIC",),
("BIGDECIMAL",),
("numeric",),
("decimal",),
("bignumeric",),
("bigdecimal",),
),
)
def test_cast_dataframe_for_parquet_w_float_numeric(numeric_type):
dataframe = pandas.DataFrame(
{
"row_num": [0, 1, 2],
"num_col": pandas.Series(
# Very much not recommend as the whole point of NUMERIC is to
# be more accurate than a floating point number, but tested to
# keep compatibility with CSV-based uploads. See:
# https://github.com/googleapis/python-bigquery-pandas/issues/421
[1.25, -1.25, 42.5],
dtype="float64",
),
"row_num_2": [0, 1, 2],
},
# Use multiple columns to ensure column order is maintained.
columns=["row_num", "num_col", "row_num_2"],
)
schema = {
"fields": [
{"name": "num_col", "type": numeric_type},
{"name": "not_in_df", "type": "IGNORED"},
]
}
result = load.cast_dataframe_for_parquet(dataframe, schema)
expected = pandas.DataFrame(
{
"row_num": [0, 1, 2],
"num_col": pandas.Series(
[decimal.Decimal(1.25), decimal.Decimal(-1.25), decimal.Decimal(42.5)],
dtype="object",
),
"row_num_2": [0, 1, 2],
},
columns=["row_num", "num_col", "row_num_2"],
)
pandas.testing.assert_frame_equal(result, expected)
def test_cast_dataframe_for_parquet_w_string_date():
dataframe = pandas.DataFrame(
{
"row_num": [0, 1, 2],
"date_col": pandas.Series(
["2021-04-17", "1999-12-31", "2038-01-19"],
dtype="object",
),
"row_num_2": [0, 1, 2],
},
# Use multiple columns to ensure column order is maintained.
columns=["row_num", "date_col", "row_num_2"],
)
schema = {
"fields": [
{"name": "date_col", "type": "DATE"},
{"name": "not_in_df", "type": "IGNORED"},
]
}
result = load.cast_dataframe_for_parquet(dataframe, schema)
expected = pandas.DataFrame(
{
"row_num": [0, 1, 2],
"date_col": pandas.Series(
["2021-04-17", "1999-12-31", "2038-01-19"],
dtype=db_dtypes.DateDtype(),
),
"row_num_2": [0, 1, 2],
},
columns=["row_num", "date_col", "row_num_2"],
)
pandas.testing.assert_frame_equal(result, expected)
def test_cast_dataframe_for_parquet_ignores_repeated_fields():
dataframe = pandas.DataFrame(
{
"row_num": [0, 1, 2],
"repeated_col": pandas.Series(
[
[datetime.date(2021, 4, 17)],
[datetime.date(199, 12, 31)],
[datetime.date(2038, 1, 19)],
],
dtype="object",
),
"row_num_2": [0, 1, 2],
},
# Use multiple columns to ensure column order is maintained.
columns=["row_num", "repeated_col", "row_num_2"],
)
expected = dataframe.copy()
schema = {"fields": [{"name": "repeated_col", "type": "DATE", "mode": "REPEATED"}]}
result = load.cast_dataframe_for_parquet(dataframe, schema)
pandas.testing.assert_frame_equal(result, expected)
def test_cast_dataframe_for_parquet_w_null_fields():
dataframe = | pandas.DataFrame({"int_col": [0, 1, 2], "str_col": ["a", "b", "c"]}) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
import os
from scipy.interpolate import interp1d
import time
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
from information_measures import *
from joblib import Parallel, delayed
#from arch import arch_model
def rmspe(y_true, y_pred):
return (np.sqrt(np.mean(np.square((y_true - y_pred) / y_true))))
def log_return(list_stock_prices): # Stock prices are estimated through wap values
return np.log(list_stock_prices).diff()
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def compute_wap(book_pd):
wap = (book_pd['bid_price1'] * book_pd['ask_size1'] + book_pd['ask_price1'] * book_pd['bid_size1']) / (book_pd['bid_size1']+ book_pd['ask_size1'])
return wap
def realized_volatility_from_book_pd(book_stock_time):
wap = compute_wap(book_stock_time)
returns = log_return(wap)
volatility = realized_volatility(returns)
return volatility
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
# Estimate stock price per time point
df_book_data['wap'] = compute_wap(df_book_data)
# Compute log return from wap values per time_id
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
# Compute the square root of the sum of log return squared to get realized volatility
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
# Formatting
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
def stupidForestPrediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test):
naive_predictions_train = past_realized_volatility_per_stock(list_file=book_path_train,prediction_column_name=prediction_column_name)
df_joined_train = train_targets_pd.merge(naive_predictions_train[['row_id','pred']], on = ['row_id'], how = 'left')
X = np.array(df_joined_train['pred']).reshape(-1,1)
y = np.array(df_joined_train['target']).reshape(-1,)
regr = RandomForestRegressor(random_state=0)
regr.fit(X, y)
naive_predictions_test = past_realized_volatility_per_stock(list_file=book_path_test,prediction_column_name='target')
yhat = regr.predict(np.array(naive_predictions_test['target']).reshape(-1,1))
updated_predictions = naive_predictions_test.copy()
updated_predictions['target'] = yhat
return updated_predictions
def garch_fit_predict_volatility(returns_series, N=10000):
model = arch_model(returns_series * N, p=1, q=1)
model_fit = model.fit(update_freq=0, disp='off')
yhat = model_fit.forecast(horizon=600, reindex=False)
pred_volatility = np.sqrt(np.sum(yhat.variance.values)) / N
return pred_volatility
def garch_volatility_per_time_id(file_path, prediction_column_name):
# read the data
df_book_data = pd.read_parquet(file_path)
# calculate the midprice (not the WAP)
df_book_data['midprice'] =(df_book_data['bid_price1'] + df_book_data['ask_price1'])/2
# leave only WAP for now
df_book_data = df_book_data[['time_id', 'seconds_in_bucket', 'midprice']]
df_book_data = df_book_data.sort_values('seconds_in_bucket')
# make the book updates evenly spaced
df_book_data_evenly = pd.DataFrame({'time_id':np.repeat(df_book_data['time_id'].unique(), 600),
'second':np.tile(range(0,600), df_book_data['time_id'].nunique())})
df_book_data_evenly['second'] = df_book_data_evenly['second'].astype(np.int16)
df_book_data_evenly = df_book_data_evenly.sort_values('second')
df_book_data_evenly = pd.merge_asof(df_book_data_evenly,
df_book_data,
left_on='second',right_on='seconds_in_bucket',
by = 'time_id')
# Ordering for easier use
df_book_data_evenly = df_book_data_evenly[['time_id', 'second', 'midprice']]
df_book_data_evenly = df_book_data_evenly.sort_values(['time_id','second']).reset_index(drop=True)
# calculate log returns
df_book_data_evenly['log_return'] = df_book_data_evenly.groupby(['time_id'])['midprice'].apply(log_return)
df_book_data_evenly = df_book_data_evenly[~df_book_data_evenly['log_return'].isnull()]
# fit GARCH(1, 1) and predict the volatility of returns
df_garch_vol_per_stock = \
pd.DataFrame(df_book_data_evenly.groupby(['time_id'])['log_return'].agg(garch_fit_predict_volatility)).reset_index()
df_garch_vol_per_stock = df_garch_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
# add row_id column to the data
stock_id = file_path.split('=')[1]
df_garch_vol_per_stock['row_id'] = df_garch_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
# return the result
return df_garch_vol_per_stock[['row_id', prediction_column_name]]
def garch_volatility_per_stock(list_file, prediction_column_name):
df_garch_predicted = pd.DataFrame()
for file in list_file:
df_garch_predicted = pd.concat([df_garch_predicted,
garch_volatility_per_time_id(file, prediction_column_name)])
return df_garch_predicted
def entropy_from_book(book_stock_time,last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 3:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_wap(wap,seconds,last_seconds):
if last_seconds < 600:
idx = np.where(seconds >= last_seconds)[0]
if len(idx) < 3:
return 0
else:
wap = wap[idx]
seconds = seconds[idx]
# Closest neighbour interpolation (no changes in wap between lines)
t_new = np.arange(np.min(seconds),np.max(seconds))
nearest = interp1d(seconds, wap, kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
# sampleEntropy = ApEn_new(resampled_wap,3,0.001)
return sampleEntropy
def linearFit(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = np.array(compute_wap(book_stock_time))
t_init = book_stock_time['seconds_in_bucket']
return (wap[-1] - wap[0])/(np.max(t_init) - np.min(t_init))
def wapStat(book_stock_time, last_min):
if last_min < 10:
book_stock_time = book_stock_time[book_stock_time['seconds_in_bucket'] >= (600-last_min*60)]
if book_stock_time.empty == True or book_stock_time.shape[0] < 2:
return 0
wap = compute_wap(book_stock_time)
t_init = book_stock_time['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, wap, kind='nearest')
resampled_wap = nearest(t_new)
return np.std(resampled_wap)
def entropy_Prediction(book_path_train,prediction_column_name,train_targets_pd,book_path_test,all_stocks_ids,test_file):
# Compute features
book_features_encoded_test = computeFeatures_1(book_path_test,'test',test_file,all_stocks_ids)
book_features_encoded_train = computeFeatures_1(book_path_train,'train',train_targets_pd,all_stocks_ids)
X = book_features_encoded_train.drop(['row_id','target','stock_id'],axis=1)
y = book_features_encoded_train['target']
# Modeling
catboost_default = CatBoostRegressor(verbose=0)
catboost_default.fit(X,y)
# Predict
X_test = book_features_encoded_test.drop(['row_id','stock_id'],axis=1)
yhat = catboost_default.predict(X_test)
# Formatting
yhat_pd = pd.DataFrame(yhat,columns=['target'])
predictions = pd.concat([test_file,yhat_pd],axis=1)
return predictions
def computeFeatures_1(book_path,prediction_column_name,train_targets_pd,all_stocks_ids):
book_all_features = pd.DataFrame()
encoder = np.eye(len(all_stocks_ids))
stocks_id_list, row_id_list = [], []
volatility_list, entropy2_list = [], []
linearFit_list, linearFit5_list, linearFit2_list = [], [], []
wap_std_list, wap_std5_list, wap_std2_list = [], [], []
for file in book_path:
start = time.time()
book_stock = pd.read_parquet(file)
stock_id = file.split('=')[1]
print('stock id computing = ' + str(stock_id))
stock_time_ids = book_stock['time_id'].unique()
for time_id in stock_time_ids:
# Access book data at this time + stock
book_stock_time = book_stock[book_stock['time_id'] == time_id]
# Create feature matrix
stocks_id_list.append(stock_id)
row_id_list.append(str(f'{stock_id}-{time_id}'))
volatility_list.append(realized_volatility_from_book_pd(book_stock_time=book_stock_time))
entropy2_list.append(entropy_from_book(book_stock_time=book_stock_time,last_min=2))
linearFit_list.append(linearFit(book_stock_time=book_stock_time,last_min=10))
linearFit5_list.append(linearFit(book_stock_time=book_stock_time,last_min=5))
linearFit2_list.append(linearFit(book_stock_time=book_stock_time,last_min=2))
wap_std_list.append(wapStat(book_stock_time=book_stock_time,last_min=10))
wap_std5_list.append(wapStat(book_stock_time=book_stock_time,last_min=5))
wap_std2_list.append(wapStat(book_stock_time=book_stock_time,last_min=2))
print('Computing one stock entropy took', time.time() - start, 'seconds for stock ', stock_id)
# Merge targets
stocks_id_pd = pd.DataFrame(stocks_id_list,columns=['stock_id'])
row_id_pd = pd.DataFrame(row_id_list,columns=['row_id'])
volatility_pd = pd.DataFrame(volatility_list,columns=['volatility'])
entropy2_pd = pd.DataFrame(entropy2_list,columns=['entropy2'])
linearFit_pd = pd.DataFrame(linearFit_list,columns=['linearFit_coef'])
linearFit5_pd = pd.DataFrame(linearFit5_list,columns=['linearFit_coef5'])
linearFit2_pd = pd.DataFrame(linearFit2_list,columns=['linearFit_coef2'])
wap_std_pd = pd.DataFrame(wap_std_list,columns=['wap_std'])
wap_std5_pd = pd.DataFrame(wap_std5_list,columns=['wap_std5'])
wap_std2_pd = pd.DataFrame(wap_std2_list,columns=['wap_std2'])
book_all_features = pd.concat([stocks_id_pd,row_id_pd,volatility_pd,entropy2_pd,linearFit_pd,linearFit5_pd,linearFit2_pd,
wap_std_pd,wap_std5_pd,wap_std2_pd],axis=1)
# This line makes sure the predictions are aligned with the row_id in the submission file
book_all_features = train_targets_pd.merge(book_all_features, on = ['row_id'])
# Add encoded stock
encoded = list()
for i in range(book_all_features.shape[0]):
stock_id = book_all_features['stock_id'][i]
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(book_all_features.shape[0],np.array(all_stocks_ids).shape[0]))
book_all_features_encoded = pd.concat([book_all_features, encoded_pd],axis=1)
return book_all_features_encoded
def calc_wap(df):
return (df['bid_price1'] * df['ask_size1'] + df['ask_price1'] * df['bid_size1']) / (df['bid_size1'] + df['ask_size1'])
def calc_wap2(df):
return (df['bid_price2'] * df['ask_size2'] + df['ask_price2'] * df['bid_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap3(df):
return (df['bid_price2'] * df['bid_size2'] + df['ask_price2'] * df['ask_size2']) / (df['bid_size2'] + df['ask_size2'])
def calc_wap4(df):
return (df['bid_price1'] * df['bid_size1'] + df['ask_price1'] * df['ask_size1']) / (df['bid_size1'] + df['ask_size1'])
def mid_price(df):
return df['bid_price1'] /2 + df['ask_price1'] / 2
def calc_rv_from_wap_numba(values, index):
log_return = np.diff(np.log(values))
realized_vol = np.sqrt(np.sum(np.square(log_return[1:])))
return realized_vol
def load_book_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'book_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def load_trades_data_by_id(stock_id,datapath,train_test):
file_to_read = os.path.join(datapath,'trade_' + str(train_test) + str('.parquet'),'stock_id=' + str(stock_id))
df = pd.read_parquet(file_to_read)
return df
def entropy_from_df(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df2(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap2'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
# sampleEntropy = nolds.sampen(resampled_wap)
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def entropy_from_df3(df):
if df.shape[0] < 3:
return 0
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap3'], kind='nearest')
resampled_wap = nearest(t_new)
# Compute sample entropy
sampleEntropy = sampen(resampled_wap)
return sampleEntropy
def financial_metrics(df):
wap_imbalance = np.mean(df['wap'] - df['wap2'])
price_spread = np.mean((df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2))
bid_spread = np.mean(df['bid_price1'] - df['bid_price2'])
ask_spread = np.mean(df['ask_price1'] - df['ask_price2']) # Abs to take
total_volume = np.mean((df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2']))
volume_imbalance = np.mean(abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2'])))
return [wap_imbalance,price_spread,bid_spread,ask_spread,total_volume,volume_imbalance]
def financial_metrics_2(df):
wap_imbalance = df['wap'] - df['wap2']
price_spread = (df['ask_price1'] - df['bid_price1']) / ((df['ask_price1'] + df['bid_price1'])/2)
bid_spread = df['bid_price1'] - df['bid_price2']
ask_spread = df['ask_price1'] - df['ask_price2'] # Abs to take
total_volume = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
volume_imbalance = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# New features here
wap_imbalance_mean = np.mean(wap_imbalance)
wap_imbalance_sum = np.sum(wap_imbalance)
wap_imbalance_std = np.std(wap_imbalance)
wap_imbalance_max = np.max(wap_imbalance)
wap_imbalance_min = np.min(wap_imbalance)
price_spread_mean = np.mean(price_spread)
price_spread_sum = np.sum(price_spread)
price_spread_std = np.std(price_spread)
price_spread_max = np.max(price_spread)
price_spread_min = np.min(price_spread)
bid_spread_mean = np.mean(bid_spread)
bid_spread_sum = np.sum(bid_spread)
bid_spread_std = np.std(bid_spread)
bid_spread_max = np.max(bid_spread)
bid_spread_min = np.min(bid_spread)
ask_spread_mean = np.mean(ask_spread)
ask_spread_sum = np.sum(ask_spread)
ask_spread_std = np.std(ask_spread)
ask_spread_max = np.max(ask_spread)
ask_spread_min = np.min(ask_spread)
total_volume_mean = np.mean(total_volume)
total_volume_sum = np.sum(total_volume)
total_volume_std = np.std(total_volume)
total_volume_max = np.max(total_volume)
total_volume_min = np.min(total_volume)
volume_imbalance_mean = np.mean(volume_imbalance)
volume_imbalance_sum = np.sum(volume_imbalance)
volume_imbalance_std = np.std(volume_imbalance)
volume_imbalance_max = np.max(volume_imbalance)
volume_imbalance_min = np.min(volume_imbalance)
return [wap_imbalance_mean,price_spread_mean,bid_spread_mean,ask_spread_mean,total_volume_mean,volume_imbalance_mean, wap_imbalance_sum,price_spread_sum,bid_spread_sum,ask_spread_sum,total_volume_sum,volume_imbalance_sum, wap_imbalance_std,price_spread_std,bid_spread_std,ask_spread_std,total_volume_std,volume_imbalance_std, wap_imbalance_max,price_spread_max,bid_spread_max,ask_spread_max,total_volume_max,volume_imbalance_max, wap_imbalance_min,price_spread_min,bid_spread_min,ask_spread_min,total_volume_min,volume_imbalance_min]
def other_metrics(df):
if df.shape[0] < 2:
linearFit = 0
linearFit2 = 0
linearFit3 = 0
std_1 = 0
std_2 = 0
std_3 = 0
else:
linearFit = (df['wap'].iloc[-1] - df['wap'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit2 = (df['wap2'].iloc[-1] - df['wap2'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
linearFit3 = (df['wap3'].iloc[-1] - df['wap3'].iloc[0]) / ((np.max(df['seconds_in_bucket']) - np.min(df['seconds_in_bucket'])))
# Resampling
t_init = df['seconds_in_bucket']
t_new = np.arange(np.min(t_init),np.max(t_init))
# Closest neighbour interpolation (no changes in wap between lines)
nearest = interp1d(t_init, df['wap'], kind='nearest')
nearest2 = interp1d(t_init, df['wap2'], kind='nearest')
nearest3 = interp1d(t_init, df['wap3'], kind='nearest')
std_1 = np.std(nearest(t_new))
std_2 = np.std(nearest2(t_new))
std_3 = np.std(nearest3(t_new))
return [linearFit, linearFit2, linearFit3, std_1, std_2, std_3]
def load_book_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/book_{train_test}.parquet/stock_id={stock_id}')
return df
def load_trades_data_by_id_kaggle(stock_id,train_test):
df = pd.read_parquet(f'../input/optiver-realized-volatility-prediction/trade_{train_test}.parquet/stock_id={stock_id}')
return df
def computeFeatures_wEntropy(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute entropy
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_ent = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df).to_frame().reset_index().fillna(0)
df_ent2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df2).to_frame().reset_index().fillna(0)
df_ent3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(entropy_from_df3).to_frame().reset_index().fillna(0)
df_ent['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_ent['time_id']]
df_ent = df_ent.rename(columns={'time_id':'row_id',0:'entropy'})
df_ent2 = df_ent2.rename(columns={0:'entropy2'}).drop(['time_id'],axis=1)
df_ent3 = df_ent3.rename(columns={0:'entropy3'}).drop(['time_id'],axis=1)
df_ent = pd.concat([df_ent,df_ent2,df_ent3],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['entropy'])
temp2 = pd.DataFrame([0],columns=['entropy2'])
temp3 = pd.DataFrame([0],columns=['entropy3'])
df_ent = pd.concat([times_pd,temp,temp2,temp3],axis=1)
list_ent.append(df_ent)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_july(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
# Calculate realized volatility
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3']],axis=1)
df_sub = df_sub.rename(columns={'time_id':'row_id','wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3'})
# Calculate realized volatility last 5 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_5['time_id']]
df_sub_5 = pd.concat([df_sub_5,df_sub2_5['wap2'],df_sub3_5['wap3']],axis=1)
df_sub_5 = df_sub_5.rename(columns={'time_id':'row_id','wap': 'rv_5', 'wap2': 'rv2_5', 'wap3': 'rv3_5'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_5'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_5'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_5'])
df_sub_5 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
# Calculate realized volatility last 2 min
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_sub_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_2 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_2['time_id']]
df_sub_2 = pd.concat([df_sub_2,df_sub2_2['wap2'],df_sub3_2['wap3']],axis=1)
df_sub_2 = df_sub_2.rename(columns={'time_id':'row_id','wap': 'rv_2', 'wap2': 'rv2_2', 'wap3': 'rv3_2'})
else: # 0 volatility
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_2'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_2'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_2'])
df_sub_2 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3],axis=1)
list_rv.append(df_sub)
list_rv2.append(df_sub_5)
list_rv3.append(df_sub_2)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_sub_book_feats5 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics_2).to_frame().reset_index()
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={0:'embedding'})
df_sub_book_feats5[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats5.embedding.tolist(), index=df_sub_book_feats5.index)
df_sub_book_feats5['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats5['time_id']]
df_sub_book_feats5 = df_sub_book_feats5.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['wap_imbalance5'])
temp2 = pd.DataFrame([0],columns=['price_spread5'])
temp3 = pd.DataFrame([0],columns=['bid_spread5'])
temp4 = pd.DataFrame([0],columns=['ask_spread5'])
temp5 = pd.DataFrame([0],columns=['total_vol5'])
temp6 = pd.DataFrame([0],columns=['vol_imbalance5'])
df_sub_book_feats5 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_fin.append(df_sub_book_feats)
list_fin2.append(df_sub_book_feats5)
# Compute other metrics
df_others = book_stock.groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others = df_others.rename(columns={0:'embedding'})
df_others[['linearFit1_1','linearFit1_2','linearFit1_3','wap_std1_1','wap_std1_2','wap_std1_3']] = pd.DataFrame(df_others.embedding.tolist(), index=df_others.index)
df_others['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others['time_id']]
df_others = df_others.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_others.append(df_others)
isEmpty = book_stock.query(f'seconds_in_bucket >= 300').empty
if isEmpty == False:
df_others2 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others2 = df_others2.rename(columns={0:'embedding'})
df_others2[['linearFit2_1','linearFit2_2','linearFit2_3','wap_std2_1','wap_std2_2','wap_std2_3']] = pd.DataFrame(df_others2.embedding.tolist(), index=df_others2.index)
df_others2['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others2['time_id']]
df_others2 = df_others2.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit2_1'])
temp2 = pd.DataFrame([0],columns=['linearFit2_2'])
temp3 = pd.DataFrame([0],columns=['linearFit2_3'])
temp4 = pd.DataFrame([0],columns=['wap_std2_1'])
temp5 = pd.DataFrame([0],columns=['wap_std2_2'])
temp6 = pd.DataFrame([0],columns=['wap_std2_3'])
df_others2 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others2.append(df_others2)
isEmpty = book_stock.query(f'seconds_in_bucket >= 480').empty
if isEmpty == False:
df_others3 = book_stock.query(f'seconds_in_bucket >= 480').groupby(['time_id']).apply(other_metrics).to_frame().reset_index().fillna(0)
df_others3 = df_others3.rename(columns={0:'embedding'})
df_others3[['linearFit3_1','linearFit3_2','linearFit3_3','wap_std3_1','wap_std3_2','wap_std3_3']] = pd.DataFrame(df_others3.embedding.tolist(), index=df_others3.index)
df_others3['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_others3['time_id']]
df_others3 = df_others3.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = pd.DataFrame([0],columns=['linearFit3_1'])
temp2 = pd.DataFrame([0],columns=['linearFit3_2'])
temp3 = pd.DataFrame([0],columns=['linearFit3_3'])
temp4 = pd.DataFrame([0],columns=['wap_std3_1'])
temp5 = pd.DataFrame([0],columns=['wap_std3_2'])
temp6 = pd.DataFrame([0],columns=['wap_std3_3'])
df_others3 = pd.concat([times_pd,temp,temp2,temp3,temp4,temp5,temp6],axis=1)
list_others3.append(df_others3)
print('Computing one stock took', time.time() - start, 'seconds for stock ', stock_id)
# Create features dataframe
df_submission = pd.concat(list_rv)
df_submission2 = pd.concat(list_rv2)
df_submission3 = pd.concat(list_rv3)
df_ent_concat = pd.concat(list_ent)
df_fin_concat = pd.concat(list_fin)
df_fin2_concat = pd.concat(list_fin2)
df_others = pd.concat(list_others)
df_others2 = pd.concat(list_others2)
df_others3 = pd.concat(list_others3)
df_book_features = df_submission.merge(df_submission2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_submission3, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_ent_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_fin2_concat, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others2, on = ['row_id'], how='left').fillna(0)
df_book_features = df_book_features.merge(df_others3, on = ['row_id'], how='left').fillna(0)
# Add encoded stock
encoder = np.eye(len(all_stocks_ids))
encoded = list()
for i in range(df_book_features.shape[0]):
stock_id = int(df_book_features['row_id'][i].split('-')[0])
encoded_stock = encoder[np.where(all_stocks_ids == int(stock_id))[0],:]
encoded.append(encoded_stock)
encoded_pd = pd.DataFrame(np.array(encoded).reshape(df_book_features.shape[0],np.array(all_stocks_ids).shape[0]))
df_book_features_encoded = pd.concat([df_book_features, encoded_pd],axis=1)
return df_book_features_encoded
def computeFeatures_newTest_Laurent(machine, dataset, all_stocks_ids, datapath):
list_rv, list_rv2, list_rv3 = [], [], []
list_ent, list_fin, list_fin2 = [], [], []
list_others, list_others2, list_others3 = [], [], []
for stock_id in range(127):
start = time.time()
if machine == 'local':
try:
book_stock = load_book_data_by_id(stock_id,datapath,dataset)
except:
continue
elif machine == 'kaggle':
try:
book_stock = load_book_data_by_id_kaggle(stock_id,dataset)
except:
continue
# Useful
all_time_ids_byStock = book_stock['time_id'].unique()
# Calculate wap for the entire book
book_stock['wap'] = calc_wap(book_stock)
book_stock['wap2'] = calc_wap2(book_stock)
book_stock['wap3'] = calc_wap3(book_stock)
book_stock['wap4'] = calc_wap2(book_stock)
book_stock['mid_price'] = calc_wap3(book_stock)
# Calculate past realized volatility per time_id
df_sub = book_stock.groupby('time_id')['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2 = book_stock.groupby('time_id')['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3 = book_stock.groupby('time_id')['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4 = book_stock.groupby('time_id')['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5 = book_stock.groupby('time_id')['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub['time_id']]
df_sub = df_sub.rename(columns={'time_id':'row_id'})
df_sub = pd.concat([df_sub,df_sub2['wap2'],df_sub3['wap3'], df_sub4['wap4'], df_sub5['mid_price']],axis=1)
df_sub = df_sub.rename(columns={'wap': 'rv', 'wap2': 'rv2', 'wap3': 'rv3', 'wap4':'rv4','mid_price':'rv5'})
list_rv.append(df_sub)
# Query segments
bucketQuery480 = book_stock.query(f'seconds_in_bucket >= 480')
isEmpty480 = bucketQuery480.empty
bucketQuery300 = book_stock.query(f'seconds_in_bucket >= 300')
isEmpty300 = bucketQuery300.empty
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
# Calculate past realized volatility per time_id and query subset
if isEmpty300 == False:
df_sub_300 = bucketQuery300.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_300 = bucketQuery300.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_300 = bucketQuery300.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_300 = bucketQuery300.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_300 = bucketQuery300.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_300 = pd.concat([times_pd,df_sub_300['wap'],df_sub2_300['wap2'],df_sub3_300['wap3'],df_sub4_300['wap4'],df_sub5_300['mid_price']],axis=1)
df_sub_300 = df_sub_300.rename(columns={'wap': 'rv_300', 'wap2_300': 'rv2', 'wap3_300': 'rv3', 'wap4':'rv4_300','mid_price':'rv5_300'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_300'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_300'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_300'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_300'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_300'])
df_sub_300 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv2.append(df_sub_300)
# Calculate realized volatility last 2 min
if isEmpty480 == False:
df_sub_480 = bucketQuery480.groupby(['time_id'])['wap'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub2_480 = bucketQuery480.groupby(['time_id'])['wap2'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub3_480 = bucketQuery480.groupby(['time_id'])['wap3'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub4_480 = bucketQuery480.groupby(['time_id'])['wap4'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub5_480 = bucketQuery480.groupby(['time_id'])['mid_price'].agg(calc_rv_from_wap_numba, engine='numba').to_frame().reset_index()
df_sub_480 = pd.concat([times_pd,df_sub_480['wap'],df_sub2_480['wap2'],df_sub3_480['wap3'],df_sub4_480['wap4'],df_sub5_480['mid_price']],axis=1)
df_sub_480 = df_sub_480.rename(columns={'wap': 'rv_480', 'wap2_480': 'rv2', 'wap3_480': 'rv3', 'wap4':'rv4_480','mid_price':'rv5_480'})
else: # 0 volatility
zero_rv = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv_480'])
zero_rv2 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv2_480'])
zero_rv3 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv3_480'])
zero_rv4 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv4_480'])
zero_rv5 = pd.DataFrame(np.zeros((1,times_pd.shape[0])),columns=['rv5_480'])
df_sub_480 = pd.concat([times_pd,zero_rv,zero_rv2,zero_rv3,zero_rv4,zero_rv5],axis=1)
list_rv3.append(df_sub_480)
# Calculate other financial metrics from book
df_sub_book_feats = book_stock.groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats = df_sub_book_feats.rename(columns={0:'embedding'})
df_sub_book_feats[['wap_imbalance','price_spread','bid_spread','ask_spread','total_vol','vol_imbalance']] = pd.DataFrame(df_sub_book_feats.embedding.tolist(), index=df_sub_book_feats.index)
df_sub_book_feats['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats['time_id']]
df_sub_book_feats = df_sub_book_feats.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
list_fin.append(df_sub_book_feats)
if isEmpty300 == False:
df_sub_book_feats_300 = book_stock.query(f'seconds_in_bucket >= 300').groupby(['time_id']).apply(financial_metrics).to_frame().reset_index()
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={0:'embedding'})
df_sub_book_feats_300[['wap_imbalance5','price_spread5','bid_spread5','ask_spread5','total_vol5','vol_imbalance5']] = pd.DataFrame(df_sub_book_feats_300.embedding.tolist(), index=df_sub_book_feats_300.index)
df_sub_book_feats_300['time_id'] = [f'{stock_id}-{time_id}' for time_id in df_sub_book_feats_300['time_id']]
df_sub_book_feats_300 = df_sub_book_feats_300.rename(columns={'time_id':'row_id'}).drop(['embedding'],axis=1)
else:
times_pd = pd.DataFrame(all_time_ids_byStock,columns=['time_id'])
times_pd['time_id'] = [f'{stock_id}-{time_id}' for time_id in times_pd['time_id']]
times_pd = times_pd.rename(columns={'time_id':'row_id'})
temp = | pd.DataFrame([0],columns=['wap_imbalance5']) | pandas.DataFrame |
"""
Tests of Tax-Calculator utility functions.
"""
# CODING-STYLE CHECKS:
# pycodestyle test_utils.py
# pylint --disable=locally-disabled test_utils.py
#
# pylint: disable=missing-docstring,no-member,protected-access,too-many-lines
from __future__ import print_function
import os
import math
import random
import numpy as np
import pandas as pd
import pytest
# pylint: disable=import-error
from taxcalc import Policy, Records, Behavior, Calculator
from taxcalc.utils import (DIST_VARIABLES,
DIST_TABLE_COLUMNS, DIST_TABLE_LABELS,
DIFF_VARIABLES,
DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS,
SOI_AGI_BINS,
create_distribution_table, create_difference_table,
weighted_count_lt_zero, weighted_count_gt_zero,
weighted_count, weighted_sum, weighted_mean,
wage_weighted, agi_weighted,
expanded_income_weighted,
add_income_table_row_variable,
add_quantile_table_row_variable,
mtr_graph_data, atr_graph_data, dec_graph_data,
xtr_graph_plot, write_graph_file,
read_egg_csv, read_egg_json, delete_file,
bootstrap_se_ci,
certainty_equivalent,
ce_aftertax_expanded_income,
nonsmall_diffs,
quantity_response)
DATA = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[3.0, 6, 'b']]
WEIGHT_DATA = [[1.0, 2.0, 10.0],
[2.0, 4.0, 20.0],
[3.0, 6.0, 30.0]]
DATA_FLOAT = [[1.0, 2, 'a'],
[-1.0, 4, 'a'],
[0.0000000001, 3, 'a'],
[-0.0000000001, 1, 'a'],
[3.0, 6, 'a'],
[2.0, 4, 'b'],
[0.0000000001, 3, 'b'],
[-0.0000000001, 1, 'b'],
[3.0, 6, 'b']]
def test_validity_of_name_lists():
assert len(DIST_TABLE_COLUMNS) == len(DIST_TABLE_LABELS)
Records.read_var_info()
assert set(DIST_VARIABLES).issubset(Records.CALCULATED_VARS | {'s006'})
extra_vars_set = set(['num_returns_StandardDed',
'num_returns_ItemDed',
'num_returns_AMT'])
assert (set(DIST_TABLE_COLUMNS) - set(DIST_VARIABLES)) == extra_vars_set
def test_create_tables(cps_subsample):
# pylint: disable=too-many-statements,too-many-branches
# create a current-law Policy object and Calculator object calc1
rec = Records.cps_constructor(data=cps_subsample)
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
calc1.calc_all()
# create a policy-reform Policy object and Calculator object calc2
reform = {2013: {'_II_rt1': [0.15]}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
calc2.calc_all()
test_failure = False
# test creating various difference tables
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'standard_income_bins', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.20,
-0.67,
-0.78,
-0.71,
-0.82,
-0.79,
-0.73,
-0.64,
-0.23,
-0.09,
-0.06,
-0.58]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xbin', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
diff = create_difference_table(calc1.dataframe(DIFF_VARIABLES),
calc2.dataframe(DIFF_VARIABLES),
'weighted_deciles', 'combined')
assert isinstance(diff, pd.DataFrame)
tabcol = 'tot_change'
expected = [0,
0,
121721713,
1799074733,
2655187813,
3306079845,
4468286112,
5576666034,
7188935504,
8314048550,
10398339206,
9129031991,
52957371499,
5726291219,
2821882221,
580858551]
if not np.allclose(diff[tabcol].values, expected,
atol=0.51, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'share_of_change'
expected = [0.00,
0.00,
0.23,
3.40,
5.01,
6.24,
8.44,
10.53,
13.57,
15.70,
19.64,
17.24,
100.00,
10.81,
5.33,
1.10]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.15,
-0.76,
-0.78,
-0.75,
-0.79,
-0.79,
-0.79,
-0.72,
-0.68,
-0.28,
-0.58,
-0.53,
-0.23,
-0.06]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
tabcol = 'pc_aftertaxinc'
expected = [np.nan,
np.nan,
-0.15,
-0.76,
-0.78,
-0.75,
-0.79,
-0.79,
-0.79,
-0.72,
-0.68,
-0.28,
-0.58,
-0.53,
-0.23,
-0.06]
if not np.allclose(diff[tabcol].values, expected,
atol=0.005, rtol=0.0, equal_nan=True):
test_failure = True
print('diff xdec', tabcol)
for val in diff[tabcol].values:
print('{:.2f},'.format(val))
# test creating various distribution tables
dist, _ = calc2.distribution_tables(None, 'weighted_deciles')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0,
0,
-2439074403,
-1234901725,
-609273185,
2687658386,
19501356849,
29465049377,
48681577048,
88747972386,
163479377840,
709224809867,
1057504552440,
153548408569,
219064860852,
336611540446]
if not np.allclose(dist[tabcol].values, expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'num_returns_ItemDed'
expected = [0,
0,
326236,
1253241,
2240460,
2828475,
4741957,
5510030,
6883022,
8358806,
10667610,
12037635,
54847474,
5893249,
4820479,
1323906]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'expanded_income'
expected = [0,
0,
87249858210,
258005174639,
369648687648,
482950933444,
637031080899,
799835240295,
1047137967700,
1349212863519,
1849316366473,
4236199144621,
11116587317446,
1362651371493,
1589763961227,
1283783811901]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'aftertax_income'
expected = [0,
0,
82063918307,
234849286479,
336461183613,
435772857489,
559917984490,
697963511720,
906200715535,
1150438396510,
1516372357769,
3226734653812,
9146774865725,
1082675191375,
1250757557050,
893301905386]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xdec', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
dist, _ = calc2.distribution_tables(None, 'standard_income_bins')
assert isinstance(dist, pd.DataFrame)
tabcol = 'iitax'
expected = [0,
0,
-822217116,
-2113487293,
-1785384383,
4299002729,
21451400591,
62343670148,
93389591704,
293234582500,
292465924986,
100158506284,
194882962290,
1057504552440]
if not np.allclose(dist[tabcol], expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
tabcol = 'num_returns_ItemDed'
expected = [0,
0,
60455,
1302001,
2927384,
3350721,
4499431,
10181119,
8996491,
16350238,
6326459,
541189,
311987,
54847474]
if not np.allclose(dist[tabcol].tolist(), expected,
atol=0.5, rtol=0.0):
test_failure = True
print('dist xbin', tabcol)
for val in dist[tabcol].values:
print('{:.0f},'.format(val))
if test_failure:
assert 1 == 2
def test_diff_count_precision():
"""
Estimate bootstrap standard error and confidence interval for count
statistics ('tax_cut' and 'tax_inc') in difference table generated
using puf.csv input data taking no account of tbi privacy fuzzing and
assuming all filing units in each bin have the same weight. These
assumptions imply that the estimates produced here are likely to
over-estimate the precision of the count statistics.
Background information on unweighted number of filing units by bin:
DECILE BINS:
0 16268
1 14897
2 13620
3 15760
4 16426
5 18070
6 18348
7 19352
8 21051
9 61733 <--- largest unweighted bin count
A 215525
STANDARD BINS:
0 7081 <--- negative income bin is dropped in TaxBrain display
1 19355
2 22722
3 20098
4 17088
5 14515
6 24760
7 15875
8 25225
9 15123
10 10570 <--- smallest unweighted bin count
11 23113 <--- second largest unweighted WEBAPP bin count
A 215525
Background information on Trump2017.json reform used in TaxBrain run 16649:
STANDARD bin 10 ($500-1000 thousand) has weighted count of 1179 thousand;
weighted count of units with tax increase is 32 thousand.
So, the mean weight for all units in STANDARD bin 10 is 111.5421 and the
unweighted number with a tax increase is 287 assuming all units in that
bin have the same weight. (Note that 287 * 111.5421 is about 32,012.58,
which rounds to the 32 thousand shown in the TaxBrain difference table.)
STANDARD bin 11 ($1000+ thousand) has weighted count of 636 thousand;
weighted count of units with tax increase is 27 thousand.
So, the mean weight for all units in STANDARD bin 11 is about 27.517 and
the unweighted number with a tax increase is 981 assuming all units in
that bin have the same weight. (Note that 981 * 27.517 is about 26,994.18,
which rounds to the 27 thousand shown in the TaxBrain difference table.)
"""
dump = False # setting to True implies results printed and test fails
seed = 123456789
bs_samples = 1000
alpha = 0.025 # implies 95% confidence interval
# compute stderr and confidence interval for STANDARD bin 10 increase count
data_list = [111.5421] * 287 + [0.0] * (10570 - 287)
assert len(data_list) == 10570
data = np.array(data_list)
assert (data > 0).sum() == 287
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 32) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN10: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 1.90) - 1) < 0.0008
# NOTE: a se of 1.90 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 10 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 10 if the point
# estimates both had se = 1.90, then the difference in the point
# estimates has has a se = 2.687. This means that the difference
# would have to be over 5 thousand in order for there to be high
# confidence that the difference was different from zero in a
# statistically significant manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 28.33) - 1) < 0.0012
assert abs((cihi / 35.81) - 1) < 0.0012
# compute stderr and confidence interval for STANDARD bin 11 increase count
data_list = [27.517] * 981 + [0.0] * (23113 - 981)
assert len(data_list) == 23113
data = np.array(data_list)
assert (data > 0).sum() == 981
data_estimate = np.sum(data) * 1e-3
assert abs((data_estimate / 27) - 1) < 0.0005
bsd = bootstrap_se_ci(data, seed, bs_samples, np.sum, alpha)
stderr = bsd['se'] * 1e-3
cilo = bsd['cilo'] * 1e-3
cihi = bsd['cihi'] * 1e-3
if dump:
res = '{}EST={:.1f} B={} alpha={:.3f} se={:.2f} ci=[ {:.2f} , {:.2f} ]'
print(
res.format('STANDARD-BIN11: ',
data_estimate, bs_samples, alpha, stderr, cilo, cihi)
)
assert abs((stderr / 0.85) - 1) < 0.0040
# NOTE: a se of 0.85 thousand implies that when comparing the difference
# in the weighted number of filing units in STANDARD bin 11 with a
# tax increase, the difference statistic has a bigger se (because
# the variance of the difference is the sum of the variances of the
# two point estimates). So, in STANDARD bin 11 if point estimates
# both had se = 0.85, then the difference in the point estimates has
# has a se = 1.20. This means that the difference would have to be
# over 2.5 thousand in order for there to be high confidence that the
# difference was different from zero in a statistically significant
# manner.
# Or put a different way, a difference of 1 thousand cannot be
# accurately detected while a difference of 10 thousand can be
# accurately detected.
assert abs((cilo / 25.37) - 1) < 0.0012
assert abs((cihi / 28.65) - 1) < 0.0012
# fail if doing dump
assert not dump
def test_weighted_count_lt_zero():
df1 = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
grped = df1.groupby('label')
diffs = grped.apply(weighted_count_lt_zero, 'tax_diff')
exp = pd.Series(data=[4, 0], index=['a', 'b'])
exp.index.name = 'label'
pd.util.testing.assert_series_equal(exp, diffs)
df2 = pd.DataFrame(data=DATA_FLOAT, columns=['tax_diff', 's006', 'label'])
grped = df2.groupby('label')
diffs = grped.apply(weighted_count_lt_zero, 'tax_diff')
exp = pd.Series(data=[4, 0], index=['a', 'b'])
exp.index.name = 'label'
pd.util.testing.assert_series_equal(exp, diffs)
def test_weighted_count_gt_zero():
df1 = pd.DataFrame(data=DATA, columns=['tax_diff', 's006', 'label'])
grped = df1.groupby('label')
diffs = grped.apply(weighted_count_gt_zero, 'tax_diff')
exp = | pd.Series(data=[8, 10], index=['a', 'b']) | pandas.Series |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
| sql.read_sql_table("other_table", self.conn) | pandas.io.sql.read_sql_table |
#!/usr/bin/env python
"""
restructure-land.py
===================
Restructures land data for the cdm-lite.
Function
--------
For a given <batch_id>:
- get list of input files
- gather years
- for each <year>:
- get input files
- create dataframe list
- derive output and log file paths
- CHECK: if `success_file` exists: exit
- CHECK: all time fields include a real value
- concatenate dataframes into one
- CHECK: final dataframe is the same size as the sum of its components
- write to `output_file`
- IF FAILURE: write `failure_file`
- IF SUCCESS: write `success_file`
"""
import os, re, glob
import random, time
import pandas as pd
import click
# Output pattern = /gws/nopw/j04/c3s311a_lot2/data/ingest/r202001/land/cdmlite/<report_type>/<yyyy>/<report_type>-<yyyy>-<batch_id>.psv
BASE_OUTPUT_DIR = '/gws/nopw/j04/c3s311a_lot2/data/ingest/r202001/land/cdmlite'
BASE_LOG_DIR = '/gws/smf/j04/c3s311a_lot2/ingest/log/r202001/cdmlite/prep/land'
# For logging
VERBOSE = 0
DRY_RUN = False
time_field = 'date_time'
out_fields = ['observation_id', 'data_policy_licence', 'date_time', 'date_time_meaning',
'observation_duration', 'longitude', 'latitude', 'report_type',
'height_above_surface', 'observed_variable', 'units', 'observation_value',
'value_significance', 'platform_type', 'station_type', 'primary_station_id', 'station_name',
'quality_flag', 'location']
from height_handler import fix_land_height
from land_batcher import LandBatcher
nap = random.randint(10, 180)
batcher = None
def _get_batcher():
global batcher
if not batcher:
batcher = LandBatcher()
return batcher
def get_df(paths, year):
"""
Reads a list of paths, parses data into DataFrames, filters by year, then
Concatenates them together.
Returns: (DataFrame, [list of sub-DataFrames])
"""
data_frames = [pd.read_csv(f, sep='|', parse_dates=[time_field]) for f in paths]
# Only keep the required `year`
print(f'[INFO] Lengths of data frames before filtering years: {[len(_) for _ in data_frames]}')
data_frames = [_[_.date_time.dt.year == year] for _ in data_frames]
print(f'[INFO] Lengths of data frames AFTER filtering years: {[len(_) for _ in data_frames]}')
# Drop duplicates
[_.drop_duplicates(inplace=True) for _ in data_frames]
# droppers = [_ for _ in data_frames[0].columns if _ not in fields]
# data_frames = [_.drop(columns=droppers) for _ in data_frames]
df = pd.concat(data_frames)
# Drop duplicates in concatenated DataFrame
df.drop_duplicates(inplace=True)
return df, data_frames
def get_report_type(batch_id):
_batcher = _get_batcher()
report_type = str(_batcher.get_report_type(batch_id))
return report_type
def get_output_paths(batch_id, year):
# BASE/<report_type>/<yyyy>/<report_type>-<yyyy>-<batch_id>.psv
report_type = get_report_type(batch_id)
year_file = f'{report_type}-{year}-{batch_id}.psv'
gzip_file = f'{year_file}'
year = str(year)
success_dir = os.path.join(BASE_LOG_DIR, 'success', report_type)
failure_dir = os.path.join(BASE_LOG_DIR, 'failure', report_type)
output_dir = os.path.join(BASE_OUTPUT_DIR, report_type, year)
for _ in success_dir, failure_dir, output_dir:
if not os.path.isdir(_):
os.makedirs(_)
d = {'output_path': os.path.join(output_dir, gzip_file),
'success_path': os.path.join(success_dir, year_file),
'failure_path': os.path.join(failure_dir, year_file)
}
return d
def log(log_type, outputs, msg=''):
log_path = outputs[f'{log_type}_path']
if not DRY_RUN:
with open(log_path, 'w') as writer:
writer.write(msg)
log_level = {'success': 'INFO', 'failure': 'ERROR'}[log_type]
message = msg or f'Wrote: {log_path}'
print(f'[{log_level}] {message}')
if log_type == 'success':
print(f'[{log_level}] Wrote success file: {log_path}')
def _set_platform_type(x):
if pd.isnull(x['platform_type']):
return 'NULL'
return x['platform_type']
def _equal_or_slightly_less(a, b, threshold=5):
if a == b: return True
if (b - a) < 0 or (b - a) > threshold:
return False
print(f'[WARN] Lengths of main DataFrame ({a}) does not equal length of component DataFrames ({b}).')
return True
def process_year(batch_id, year, files):
"""
"""
print(f'[INFO] Working on {year} for: {batch_id}')
outputs = get_output_paths(batch_id, year)
# CHECK: if `success_file` exists: return
if os.path.isfile(outputs['success_path']):
print(f'[INFO] Success file exists: {outputs["success_path"]}')
return
if VERBOSE:
print(f'[INFO] Reading files:')
for _ in files:
print(f'\tINPUT FILE: {_}')
else:
print(f'[INFO] Reading input files: {files[0]} , etc.')
df, _partial_dfs = get_df(files, year)
# CHECK: lengths of concatenated df equals sum of individual dfs
l_df = len(df)
l_partial_dfs = sum([len(_) for _ in _partial_dfs])
if not _equal_or_slightly_less(l_df, l_partial_dfs):
log('failure', outputs, f'Data frame ({l_df}) length and individual frame lengths ({l_partial_dfs}) need checking')
return
del _partial_dfs
# Fix column errors
column_name_mapper = {'data_policy_licence ': 'data_policy_licence'}
df.rename(columns=column_name_mapper, inplace=True)
# Make sure the time field is time
df[time_field] = pd.to_datetime(df[time_field], utc=True)
# CHECK: all time fields include a real value
obs_ids_of_bad_time_fields = df[df[time_field].isnull()]['observation_id'].unique().tolist()
if len(obs_ids_of_bad_time_fields) > 0:
log('failure', outputs, f'Some fields had missing value for {time_field}. Observation IDs were: '
f'{obs_ids_of_bad_time_fields}')
return
# Add height column
fix_land_height(df)
# Modify platform type where it is not defined
df['platform_type'] = df.apply(lambda x: _set_platform_type(x), axis=1)
# Set 'report_type'
report_type = get_report_type(batch_id)
df['report_type'] = report_type
# Add the location column
df['location'] = df.apply(lambda x: 'SRID=4326;POINT({:.3f} {:.3f})'.format(x['longitude'], x['latitude']), axis=1)
# Write output file
if not DRY_RUN:
print(f'[INFO] Writing output file: {outputs["output_path"]}')
try:
df.to_csv(outputs['output_path'], sep='|', index=False, float_format='%.3f',
columns=out_fields, date_format='%Y-%m-%d %H:%M:%S%z')
log('success', outputs, msg=f'Wrote: {outputs["output_path"]}')
# Remove any previous failure file if exists
failure_file = outputs['failure_path']
if os.path.isfile(failure_file):
os.remove(failure_file)
except Exception as err:
log('failure', outputs, 'Could not write output to PSV file')
else:
print('[INFO] Not writing output in DRY RUN mode.')
def _read_years_from_gzipped_psv(fpath):
print(f'[INFO] Reading: {fpath} to detect years.')
df = | pd.read_csv(fpath, sep='|') | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from mabwiser.mab import MAB, LearningPolicy, NeighborhoodPolicy
from tests.test_base import BaseTest
class MABTest(BaseTest):
#################################################
# Test context free predict() method
################################################
def test_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
def test_decision_array_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
for lp in MABTest.para_lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
context_history=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1], [0, 0, 1, 0, 0],
[0, 2, 2, 3, 5], [1, 3, 1, 1, 1], [0, 0, 0, 0, 0],
[0, 1, 4, 3, 5], [0, 1, 2, 4, 5], [1, 2, 1, 1, 3]],
contexts=[[0, 1, 2, 3, 5], [1, 1, 1, 1, 1]],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=True)
#################################################
# Test context free predict_expectation() method
################################################
def test_exp_arm_list_int(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_arm_list_str(self):
for lp in MABTest.lps:
self.predict(arms=["A", "B", "C"],
decisions=["A", "A", "A", "B", "B", "B", "C", "C", "C"],
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_series(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=pd.Series([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=[0, 0, 0, 0, 0, 0, 1, 1, 1],
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=[1, 1, 1, 2, 2, 2, 3, 3, 3],
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions=np.array([1, 1, 1, 2, 2, 2, 3, 3, 3]),
rewards=np.array([0, 0, 0, 0, 0, 0, 1, 1, 1]),
learning_policy=lp,
seed=123456,
num_run=1,
is_predict=False)
def test_exp_decision_series_reward_array(self):
for lp in MABTest.lps:
self.predict(arms=[1, 2, 3],
decisions= | pd.Series([1, 1, 1, 2, 2, 2, 3, 3, 3]) | pandas.Series |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
self.assertTrue((result == expected).all())
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args, **kwargs:
randn(), r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
self.assertEqual(cols.dtype, np.dtype('O'))
self.assertEqual(cols.dtype, joined.dtype)
assert_array_equal(cols.values, joined.values)
def test_slice_keeps_name(self):
# GH4226
st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles')
et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles')
dr = pd.date_range(st, et, freq='H', name='timebucket')
self.assertEqual(dr[1:].name, dr.name)
def test_join_self(self):
index = date_range('1/1/2000', periods=10)
kinds = 'outer', 'inner', 'left', 'right'
for kind in kinds:
joined = index.join(index, how=kind)
self.assertIs(index, joined)
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
if _np_version_under1p7:
raise nose.SkipTest
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * pd.datetools.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.DatetimeIndex(start=index[0], end=index[-1], freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self):
df = tm.makeCustomDataframe(10, 10, data_gen_f=lambda *args:
np.random.randint(2), c_idx_type='p',
r_idx_type='dt')
s = df.iloc[:5, 0]
joins = 'left', 'right', 'inner', 'outer'
for join in joins:
with tm.assertRaisesRegexp(ValueError, 'can only call with other '
'PeriodIndex-ed objects'):
df.columns.join(s.index, how=join)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02',
'2014-02', '2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
arr, idx = idx1.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0])
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
exp_arr = np.array([0, 0, 1, 2, 0, 2])
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(exp_idx))
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3])
arr, idx = idx3.factorize()
self.assert_numpy_array_equal(arr, exp_arr)
self.assertTrue(idx.equals(idx3))
class TestDatetime64(tm.TestCase):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setUp(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(rand(len(dti)), dti)
def test_datetimeindex_accessors(self):
dti = DatetimeIndex(
freq='D', start=datetime(1998, 1, 1), periods=365)
self.assertEqual(dti.year[0], 1998)
self.assertEqual(dti.month[0], 1)
self.assertEqual(dti.day[0], 1)
self.assertEqual(dti.hour[0], 0)
self.assertEqual(dti.minute[0], 0)
self.assertEqual(dti.second[0], 0)
self.assertEqual(dti.microsecond[0], 0)
self.assertEqual(dti.dayofweek[0], 3)
self.assertEqual(dti.dayofyear[0], 1)
self.assertEqual(dti.dayofyear[120], 121)
self.assertEqual(dti.weekofyear[0], 1)
self.assertEqual(dti.weekofyear[120], 18)
self.assertEqual(dti.quarter[0], 1)
self.assertEqual(dti.quarter[120], 2)
self.assertEqual(dti.is_month_start[0], True)
self.assertEqual(dti.is_month_start[1], False)
self.assertEqual(dti.is_month_start[31], True)
self.assertEqual(dti.is_quarter_start[0], True)
self.assertEqual(dti.is_quarter_start[90], True)
self.assertEqual(dti.is_year_start[0], True)
self.assertEqual(dti.is_year_start[364], False)
self.assertEqual(dti.is_month_end[0], False)
self.assertEqual(dti.is_month_end[30], True)
self.assertEqual(dti.is_month_end[31], False)
self.assertEqual(dti.is_month_end[364], True)
self.assertEqual(dti.is_quarter_end[0], False)
self.assertEqual(dti.is_quarter_end[30], False)
self.assertEqual(dti.is_quarter_end[89], True)
self.assertEqual(dti.is_quarter_end[364], True)
self.assertEqual(dti.is_year_end[0], False)
self.assertEqual(dti.is_year_end[364], True)
self.assertEqual(len(dti.year), 365)
self.assertEqual(len(dti.month), 365)
self.assertEqual(len(dti.day), 365)
self.assertEqual(len(dti.hour), 365)
self.assertEqual(len(dti.minute), 365)
self.assertEqual(len(dti.second), 365)
self.assertEqual(len(dti.microsecond), 365)
self.assertEqual(len(dti.dayofweek), 365)
self.assertEqual(len(dti.dayofyear), 365)
self.assertEqual(len(dti.weekofyear), 365)
self.assertEqual(len(dti.quarter), 365)
self.assertEqual(len(dti.is_month_start), 365)
self.assertEqual(len(dti.is_month_end), 365)
self.assertEqual(len(dti.is_quarter_start), 365)
self.assertEqual(len(dti.is_quarter_end), 365)
self.assertEqual(len(dti.is_year_start), 365)
self.assertEqual(len(dti.is_year_end), 365)
dti = DatetimeIndex(
freq='BQ-FEB', start=datetime(1998, 1, 1), periods=4)
self.assertEqual(sum(dti.is_quarter_start), 0)
self.assertEqual(sum(dti.is_quarter_end), 4)
self.assertEqual(sum(dti.is_year_start), 0)
self.assertEqual(sum(dti.is_year_end), 1)
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay, CBD requires np >= 1.7
if not _np_version_under1p7:
bday_egypt = offsets.CustomBusinessDay(weekmask='Sun Mon Tue Wed Thu')
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
self.assertRaises(ValueError, lambda: dti.is_month_start)
dti = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
self.assertEqual(dti.is_month_start[0], 1)
tests = [
(Timestamp('2013-06-01', offset='M').is_month_start, 1),
(Timestamp('2013-06-01', offset='BM').is_month_start, 0),
(Timestamp('2013-06-03', offset='M').is_month_start, 0),
(Timestamp('2013-06-03', offset='BM').is_month_start, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_month_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_quarter_end, 1),
(Timestamp('2013-02-28', offset='Q-FEB').is_year_end, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_month_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_quarter_start, 1),
(Timestamp('2013-03-01', offset='Q-FEB').is_year_start, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_month_end, 1),
(Timestamp('2013-03-31', offset='QS-FEB').is_quarter_end, 0),
(Timestamp('2013-03-31', offset='QS-FEB').is_year_end, 0),
(Timestamp('2013-02-01', offset='QS-FEB').is_month_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_quarter_start, 1),
(Timestamp('2013-02-01', offset='QS-FEB').is_year_start, 1),
(Timestamp('2013-06-30', offset='BQ').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQ').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_quarter_end, 1),
(Timestamp('2013-06-28', offset='BQ').is_year_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_month_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_quarter_end, 0),
(Timestamp('2013-06-30', offset='BQS-APR').is_year_end, 0),
(Timestamp('2013-06-28', offset='BQS-APR').is_month_end, 1),
(Timestamp('2013-06-28', offset='BQS-APR').is_quarter_end, 1),
(Timestamp('2013-03-29', offset='BQS-APR').is_year_end, 1),
(Timestamp('2013-11-01', offset='AS-NOV').is_year_start, 1),
(Timestamp('2013-10-31', offset='AS-NOV').is_year_end, 1)]
for ts, value in tests:
self.assertEqual(ts, value)
def test_nanosecond_field(self):
dti = DatetimeIndex(np.arange(10))
self.assert_numpy_array_equal(dti.nanosecond, np.arange(10))
def test_datetimeindex_diff(self):
dti1 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=100)
dti2 = DatetimeIndex(freq='Q-JAN', start=datetime(1997, 12, 31),
periods=98)
self.assertEqual(len(dti1.diff(dti2)), 2)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
self.assertEqual(s[48], 48)
self.assertEqual(s['1/2/2009'], 48)
self.assertEqual(s['2009-1-2'], 48)
self.assertEqual(s[datetime(2009, 1, 2)], 48)
self.assertEqual(s[lib.Timestamp(datetime(2009, 1, 2))], 48)
self.assertRaises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
self.assertEqual(s[48], -1)
s['1/2/2009'] = -2
self.assertEqual(s[48], -2)
s['1/2/2009':'2009-06-05'] = -3
self.assertTrue((s[48:54] == -3).all())
def test_datetimeindex_constructor(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
self.assertRaises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(
['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
self.assertTrue(idx7.equals(idx8))
for other in [idx2, idx3, idx4, idx5, idx6]:
self.assertTrue((idx1.values == other.values).all())
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = DatetimeIndex(start=sdate, freq='1B', periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[0], sdate + 0 * dt.bday)
self.assertEqual(idx.freq, 'B')
idx = DatetimeIndex(end=edate, freq=('D', 5), periods=20)
self.assertEqual(len(idx), 20)
self.assertEqual(idx[-1], edate)
self.assertEqual(idx.freq, '5D')
idx1 = DatetimeIndex(start=sdate, end=edate, freq='W-SUN')
idx2 = DatetimeIndex(start=sdate, end=edate,
freq= | dt.Week(weekday=6) | pandas.core.datetools.Week |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from Borehole_Example.src_ProdKernel_vvCV_MD1P.vv_CV_unbalanced_FixB_MD1P_borehole import *
from Borehole_Example.src_ProdKernel_vvCV_MD1P.vv_CV_unbalanced_MD1P_borehole import *
from Borehole_Example.src_ProdKernel_vvCV_MD1P.stein_operators_borehole import *
from Borehole_Example.src_ProdKernel_vvCV_MD1P.product_base_kernels_borehole import *
def my_func_LF(X):
assert X.dim() == 2, "Design points matrix X should be a 2d tensor; each row represents a instance. And each instance should match the dim of Borehole func."
n = X.size()[0]
d = X.size()[1]
o = torch.zeros(n)
for i in range(n):
r_w = X[i,0]
r = X[i,1]
T_u = X[i,2]
T_l = X[i,3]
H_u = X[i,4]
H_l = X[i,5]
L = X[i,6]
K_w = X[i,7]
o[i] = 5*T_u*(H_u - H_l)/(torch.log(r/r_w) * (1.5 + (2 *L*T_u)/(torch.log(r/r_w) * (r_w ** 2) * K_w) + T_u / T_l))
if o.dim() == 1:
o = o.unsqueeze(dim=1)
assert o.size() == torch.Size([n, 1])
return o
def my_func_HF(X):
assert X.dim() == 2, "Design points matrix X should be a 2d tensor; each row represents a instance. And each instance should match the dim of Borehole func."
n = X.size()[0]
d = X.size()[1]
o = torch.zeros(n)
for i in range(n):
r_w = X[i, 0]
r = X[i, 1]
T_u = X[i, 2]
T_l = X[i, 3]
H_u = X[i, 4]
H_l = X[i, 5]
L = X[i, 6]
K_w = X[i, 7]
o[i] = 2 * math.pi * T_u * (H_u - H_l) / (torch.log(r/r_w) * (1.0 + (2*L*T_u )/( torch.log(r/r_w) * (r_w**2) * K_w) + T_u/T_l) )
if o.dim() == 1:
o = o.unsqueeze(dim=1)
assert o.size() == torch.Size([n, 1])
return o
def EXP_borehole_unbalanced(no_replica=20, no_epochs=800, no_points_HF=50, no_points_LF=100, my_batch_size_tune=5, my_lr_tune=0.2, my_tune_epochs=10, my_tune_verbose=True, my_regularizer_const_weights_optimCV=1e-5, my_batch_size_optimCV = 10, my_lr_optimCV=0.004, my_optimCV_verbose=True):
NO_tasks= 2
large_saved_MC_ests = torch.zeros(no_replica, NO_tasks)
large_save_est_scalar_f1 = torch.zeros(no_replica, no_epochs)
large_save_est_scalar_f2 = torch.zeros(no_replica, no_epochs)
large_save_est_vecfunc = torch.zeros(no_replica, no_epochs, NO_tasks)
large_save_est_vecfunc_fixB = torch.zeros(no_replica, no_epochs, NO_tasks)
for i in range(no_replica):
# Setting means and vars
mu_r_w = torch.ones(1) * 0.1
mu_r = torch.ones(1) * 100.
mu_T_u = torch.ones(1) * ((63070 + 115600) / 2)
mu_T_l = torch.ones(1) * ((63.1 + 116) / 2)
mu_H_u = torch.ones(1) * ((990 + 1110) / 2)
mu_H_l = torch.ones(1) * ((700 + 820) / 2)
mu_L = torch.ones(1) * ((1120 + 1680) / 2)
mu_K_w = torch.ones(1) * ((9855 + 12045) / 2)
var_r_w = torch.ones(1) * 0.0161812 ** 2
var_r = torch.ones(1) * 0.01
var_T_u = torch.ones(1) * 20.
var_T_l = torch.ones(1) * 1.
var_H_u = torch.ones(1) * 1.
var_H_l = torch.ones(1) * 1.
var_L = torch.ones(1) * 10.
var_K_w = torch.ones(1) * 30.
my_mus = torch.Tensor([mu_r_w, mu_r, mu_T_u, mu_T_l, mu_H_u, mu_H_l, mu_L, mu_K_w])
my_mus = my_mus.unsqueeze(dim=1)
my_mus.size()
my_vars = torch.Tensor([var_r_w, var_r, var_T_u, var_T_l, var_H_u, var_H_l, var_L, var_K_w])
my_vars = my_vars.unsqueeze(dim=1)
my_vars.size()
print("REP {} out of {}-----------".format(i + 1, no_replica))
# Training samples
m_HF = no_points_HF
m_LF = no_points_LF
torch.manual_seed(2 * i )
r_ws_X1 = mu_r_w + torch.sqrt(var_r_w) * torch.randn(m_HF, 1)
rs_X1 = mu_r + torch.sqrt(var_r) * torch.randn(m_HF, 1)
T_us_X1 = mu_T_u + torch.sqrt(var_T_u) * torch.randn(m_HF, 1)
T_ls_X1 = mu_T_l + torch.sqrt(var_T_l) * torch.randn(m_HF, 1)
H_us_X1 = mu_H_u + torch.sqrt(var_H_u) * torch.randn(m_HF, 1)
H_ls_X1 = mu_H_l + torch.sqrt(var_H_l) * torch.randn(m_HF, 1)
Ls_X1 = mu_L + torch.sqrt(var_L) * torch.randn(m_HF, 1)
K_ws_X1 = mu_K_w + torch.sqrt(var_K_w) * torch.randn(m_HF, 1)
X1 = torch.stack((r_ws_X1, rs_X1, T_us_X1, T_ls_X1, H_us_X1, H_ls_X1, Ls_X1, K_ws_X1), dim=1).squeeze()
X1.size()
Y1 = my_func_LF(X1)
Y1.size()
torch.manual_seed(2 * i+1)
r_ws_X2 = mu_r_w + torch.sqrt(var_r_w) * torch.randn(m_LF, 1)
rs_X2 = mu_r + torch.sqrt(var_r) * torch.randn(m_LF, 1)
T_us_X2 = mu_T_u + torch.sqrt(var_T_u) * torch.randn(m_LF, 1)
T_ls_X2 = mu_T_l + torch.sqrt(var_T_l) * torch.randn(m_LF, 1)
H_us_X2 = mu_H_u + torch.sqrt(var_H_u) * torch.randn(m_LF, 1)
H_ls_X2 = mu_H_l + torch.sqrt(var_H_l) * torch.randn(m_LF, 1)
Ls_X2 = mu_L + torch.sqrt(var_L) * torch.randn(m_LF, 1)
K_ws_X2 = mu_K_w + torch.sqrt(var_K_w) * torch.randn(m_LF, 1)
X2 = torch.stack((r_ws_X2, rs_X2, T_us_X2, T_ls_X2, H_us_X2, H_ls_X2, Ls_X2, K_ws_X2), dim=1).squeeze()
X2.size()
Y2 = my_func_HF(X2)
Y2.size()
# Compute scores
score_X1 = product_Normal_score(my_mus, my_vars, X1)
score_X1.size()
score_X2 = product_Normal_score(my_mus, my_vars, X2)
score_X2.size()
xall = (X1, X2)
yall = (Y1, Y2)
score_all = (score_X1, score_X2)
# Monte Carlo estimates
large_saved_MC_ests[i] = torch.Tensor([Y1.mean(dim=0), Y2.mean(dim=0)])
# vv-CV-unbalanced: MD1P with B fixed
print("REP {} out of {} --- vv-CV-unbalanced: MD1P with B fixed -----------".format(i + 1, no_replica))
my_SCV_vectorvaluedfunc_unbalanced_fixB = VV_CV_vectorvaluedfuncs_model_unbalanced_fixB_borehole(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_unbalanced_fixB_borehole, prior_kernel=stein_base_kernel_borehole, base_kernel=prod_rbf_kernel_Borehore, Xs_tuple=xall, Ys_tuple=yall, scores_tuple=score_all)
torch.manual_seed(0)
my_SCV_vectorvaluedfunc_unbalanced_fixB.do_tune_kernelparams_negmllk(batch_size_tune=my_batch_size_tune, flag_if_use_medianheuristic=False, beta_cstkernel=1, lr=my_lr_tune, epochs=my_tune_epochs, verbose=my_tune_verbose)
# Mannualy set a B
my_SCV_vectorvaluedfunc_unbalanced_fixB.B = 0.005 * torch.Tensor([[0.1, 0.01], [0.01, 0.1]])
my_SCV_vectorvaluedfunc_unbalanced_fixB.do_optimize_vv_CV(regularizer_const=my_regularizer_const_weights_optimCV, batch_size=np.int(my_batch_size_optimCV/NO_tasks), lr=my_lr_optimCV, epochs=no_epochs, verbose=my_optimCV_verbose)
large_save_est_vecfunc_fixB[i] = my_SCV_vectorvaluedfunc_unbalanced_fixB.saved_BQ_est.squeeze().detach().clone()
# vv-CV-unbalanced: MD1P with learning B
print("REP {} out of {} --- vv-CV-unbalanced: MD1P with learning B -----------".format(i + 1, no_replica))
torch.manual_seed(0)
my_SCV_vectorvaluedfunc = VV_CV_vectorvaluedfuncs_model_unbalanced_borehole(vv_cv_objective=penalized_ls_objective_vectorvaluedfunc_unbalanced_borehole, prior_kernel=stein_base_kernel_borehole,base_kernel=prod_rbf_kernel_Borehore, Xs_tuple=xall, Ys_tuple=yall, scores_tuple=score_all)
my_SCV_vectorvaluedfunc.do_tune_kernelparams_negmllk(batch_size_tune=my_batch_size_tune, flag_if_use_medianheuristic=False, beta_cstkernel=1, lr=my_lr_tune, epochs=my_tune_epochs, verbose=my_tune_verbose)
torch.manual_seed(0)
my_SCV_vectorvaluedfunc.do_optimize_vv_CV(regularizer_const=my_regularizer_const_weights_optimCV, regularizer_const_FB=1, batch_size=np.int(my_batch_size_optimCV/NO_tasks), lr=my_lr_optimCV, epochs=no_epochs, verbose=my_optimCV_verbose)
large_save_est_vecfunc[i] = my_SCV_vectorvaluedfunc.saved_BQ_est.squeeze().detach().clone()
return no_replica, no_epochs, large_saved_MC_ests , large_save_est_scalar_f1, large_save_est_scalar_f2 , large_save_est_vecfunc_fixB , large_save_est_vecfunc
class Borehole_unbalanced_exps(object):
def __init__(self, set_of_ss_HF, set_of_ss_LF, no_replica, no_epochs, set_of_batch_size_tune, set_of_lr_tune, set_of_tune_epochs, tune_verbose, set_of_regularizer_const_weights_optimCV, set_of_batch_size_optimCV, set_of_lr_optimCV, optimCV_verbose ):
"""
:param set_of_ss_HF: list, e.g. [50, 50, 50]
:param set_of_ss_LF: list, e.g. [50, 100,150]
:param no_replica: int
:param no_epochs: int
:param set_of_batch_size_tune: list
:param set_of_lr_tune: list
:param set_of_tune_epochs: list
:param tune_verbose: bool
:param set_of_regularizer_const_weights_optimCV: list
:param set_of_batch_size_optimCV: list
:param set_of_lr_optimCV: list
:param optimCV_verbose: bool
"""
assert len(set_of_ss_LF) == len(set_of_ss_HF), "set_of_ss_LF and set_of_ss_HF should have equal size."
self.set_of_ss_HF = set_of_ss_HF
self.set_of_ss_LF = set_of_ss_LF
self.no_replica = no_replica
self.no_epochs = no_epochs
self.set_of_batch_size_tune = set_of_batch_size_tune
self.set_of_lr_tune = set_of_lr_tune
self.set_of_tune_epochs = set_of_tune_epochs
self.tune_verbose = tune_verbose
self.set_of_regularizer_const_weights_optimCV = set_of_regularizer_const_weights_optimCV
self.set_of_batch_size_optimCV = set_of_batch_size_optimCV
self.set_of_lr_optimCV = set_of_lr_optimCV
self.optimCV_verbose = optimCV_verbose
self.no_exps = len(set_of_ss_LF) # the set of sample sizes, e.g., we have (50,50) (50, 100) (50 150) three experiments
self.no_tasks = 2 # this is fixed as we only have 2 functions for the Borehole example
self.large_saved_MC_ests_tensor = torch.zeros( self.no_exps, self.no_replica, self.no_tasks)
self.large_save_est_scalar_f1_tensor = torch.zeros(self.no_exps, self.no_replica, self.no_epochs)
self.large_save_est_scalar_f2_tensor = torch.zeros(self.no_exps, self.no_replica, self.no_epochs)
self.large_save_est_vecfunc_tensor = torch.zeros(self.no_exps, self.no_replica, self.no_epochs, self.no_tasks)
self.large_save_est_vecfunc_fixB_tensor = torch.zeros(self.no_exps, self.no_replica, self.no_epochs, self.no_tasks)
def run_borehole(self, if_plt=True):
for i in range(self.no_exps):
cur_ss_HF = self.set_of_ss_HF[i]
cur_ss_LF = self.set_of_ss_LF[i]
cur_bs_tune = self.set_of_batch_size_tune[i]
cur_lr_tune = self.set_of_lr_tune[i]
cur_epochs_tune = self.set_of_tune_epochs[i]
cur_regularizer_const_weights_optimCV= self.set_of_regularizer_const_weights_optimCV[i]
cur_bs_optimCV = self.set_of_batch_size_optimCV[i]
cur_lr_optimCV = self.set_of_lr_optimCV[i]
no_replica, no_epochs, \
large_saved_MC_ests, \
large_save_est_scalar_f1, large_save_est_scalar_f2, \
large_save_est_vecfunc_fixB, \
large_save_est_vecfunc = EXP_borehole_unbalanced(no_replica=self.no_replica, no_epochs=self.no_epochs,\
no_points_HF = cur_ss_HF, \
no_points_LF=cur_ss_LF,\
my_batch_size_tune=cur_bs_tune,\
my_lr_tune=cur_lr_tune, my_tune_epochs=cur_epochs_tune,\
my_tune_verbose=self.tune_verbose,\
my_regularizer_const_weights_optimCV=cur_regularizer_const_weights_optimCV,\
my_batch_size_optimCV=cur_bs_optimCV,\
my_lr_optimCV=cur_lr_optimCV)
self.large_saved_MC_ests_tensor[i,] = large_saved_MC_ests
self.large_save_est_scalar_f1_tensor[i,] = large_save_est_scalar_f1
self.large_save_est_scalar_f2_tensor[i,] = large_save_est_scalar_f2
self.large_save_est_vecfunc_tensor[i,] = large_save_est_vecfunc
self.large_save_est_vecfunc_fixB_tensor[i,] = large_save_est_vecfunc_fixB
if if_plt==True:
tv_LF = 57.9472 # when mu_r=100 and sample size is 500000
tv_HF = 72.8904 #
fig, ax = plt.subplots()
sns.set_style("darkgrid")
clrs = sns.color_palette("Paired")
start_pos = 0
plt.xlabel('Number of Epochs')
plt.ylabel('Abs. Err.')
plt.hlines((large_saved_MC_ests[:, 1] - tv_HF).abs().mean().repeat(1, no_epochs), start_pos + 1, no_epochs, colors='g',label='MC-HF')
vv_HF_mean_fixB = (large_save_est_vecfunc_fixB[:, :, 1] - tv_HF).abs().mean(dim=0).detach().numpy()
vv_HF_std_fixB = (large_save_est_vecfunc_fixB[:, :, 1] - tv_HF).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica)
ax.plot(np.array(list(range(no_epochs))[start_pos:]) + 1, vv_HF_mean_fixB[start_pos:], c=clrs[7], marker='x', label='vv-CV-FixB-HF')
ax.fill_between(np.array(list(range(no_epochs))[start_pos:]) + 1, vv_HF_mean_fixB[start_pos:] - vv_HF_std_fixB[start_pos:], vv_HF_mean_fixB[start_pos:] + vv_HF_std_fixB[start_pos:], alpha=0.3, facecolor=clrs[7])
vv_HF_mean = (large_save_est_vecfunc[:, :, 1] - tv_HF).abs().mean(dim=0).detach().numpy()
vv_HF_std = (large_save_est_vecfunc[:, :, 1] - tv_HF).abs().std(dim=0).detach().numpy() / np.sqrt(no_replica)
ax.plot(np.array(list(range(no_epochs))[start_pos:]) + 1, vv_HF_mean[start_pos:], c=clrs[9], marker='.', label='vv-CV-HF')
ax.fill_between(np.array(list(range(no_epochs))[start_pos:]) + 1, vv_HF_mean[start_pos:] - vv_HF_std[start_pos:], vv_HF_mean[start_pos:] + vv_HF_std[start_pos:], alpha=0.3, facecolor=clrs[9])
ax.legend()
plt.show()
# Run~
The_borehole_unbalanced_exps = Borehole_unbalanced_exps(set_of_ss_HF=[20, 20, 20], set_of_ss_LF=[20, 40, 60], \
no_replica=100, no_epochs=600, \
set_of_batch_size_tune=np.repeat(5,3), set_of_lr_tune=np.repeat(0.05,3),set_of_tune_epochs=np.repeat(20,3), tune_verbose=True,\
set_of_regularizer_const_weights_optimCV=np.repeat(1e-5, 3), set_of_batch_size_optimCV=np.repeat(10,3), set_of_lr_optimCV=[0.06, 0.04, 0.02], optimCV_verbose=True )
The_borehole_unbalanced_exps.run_borehole(if_plt=True)
# Results
tv_LF = 57.9472 # when mu_r=100 and sample size is 500000
tv_HF = 72.8904 #
# Plot
# Form a pd.dataframe
no_replica = The_borehole_unbalanced_exps.no_replica
set_of_HF = The_borehole_unbalanced_exps.set_of_ss_HF
set_of_LF = The_borehole_unbalanced_exps.set_of_ss_LF
no_ss = len(set_of_HF)
# Note that now the [:,:,:,0] is for HF ~
for i in range(no_ss):
# vv-CV with fixed B
VV_fixB_cvest_funcidx_methodidx_f1 = list(zip(np.abs(The_borehole_unbalanced_exps.large_save_est_vecfunc_fixB_tensor[i, :, -1, 0].detach().numpy() - tv_LF), np.repeat('Low-fidelity model', no_replica), np.repeat('Fixed B', no_replica), np.repeat(r"$m_L={}$".format(set_of_LF[i]), no_replica)))
cur_vv_CV_fixB_est_f1_df = pd.DataFrame(data=VV_fixB_cvest_funcidx_methodidx_f1, columns=['cv_est', 'func_idx', 'method_idx', 'sample_size'])
if i == 0:
vv_CV_fixB_est_f1_df = cur_vv_CV_fixB_est_f1_df
if i >= 1:
vv_CV_fixB_est_f1_df = vv_CV_fixB_est_f1_df.append(cur_vv_CV_fixB_est_f1_df)
VV_fixB_cvest_funcidx_methodidx_f2 = list(zip(np.abs(The_borehole_unbalanced_exps.large_save_est_vecfunc_fixB_tensor[i, :, -1, 1].detach().numpy() - tv_HF), np.repeat('High-fidelity model', no_replica), np.repeat('Fixed B', no_replica), np.repeat(r"$m_L={}$".format(set_of_LF[i]), no_replica)))
cur_vv_CV_fixB_est_f2_df = pd.DataFrame(data=VV_fixB_cvest_funcidx_methodidx_f2, columns=['cv_est', 'func_idx', 'method_idx', 'sample_size'])
if i == 0:
vv_CV_fixB_est_f2_df = cur_vv_CV_fixB_est_f2_df
if i >= 1:
vv_CV_fixB_est_f2_df = vv_CV_fixB_est_f2_df.append(cur_vv_CV_fixB_est_f2_df)
# vv-CV with learning B
VV_cvest_funcidx_methodidx_f1 = list(zip(np.abs(The_borehole_unbalanced_exps.large_save_est_vecfunc_tensor[i, :, -1,0].detach().numpy() - tv_LF), np.repeat('Low-fidelity model',no_replica), np.repeat('Estimated B', no_replica), np.repeat(r"$m_L={}$".format(set_of_LF[i]), no_replica)))
cur_vv_CV_est_f1_df = | pd.DataFrame(data= VV_cvest_funcidx_methodidx_f1, columns=['cv_est', 'func_idx', 'method_idx', 'sample_size']) | pandas.DataFrame |
# coding: utf-8
# MealsCount Algorithm (v2)
import os
import sys
import pandas as pd
import numpy as np
import json
import time
import math
from datetime import datetime
import abc
from . import backend_utils as bu
from . import config_parser as cp
class mcAlgorithm(metaclass=abc.ABCMeta):
"""
Base class for the MealsCount Algorithm.
"""
def __init__(self):
pass
@abc.abstractmethod
def version(self):
pass
@abc.abstractmethod
def run(self, data, cfg, bundle_groups=False):
pass
@abc.abstractmethod
def get_school_groups(self, data, format="json"):
pass
class CEPSchoolGroupGenerator:
"""
Class to encapsulate data and operations for grouping schools.
"""
__strategy = None
def __init__(self, cfg, strategy=None):
if not (strategy):
raise ValueError("ERROR: Invalid strategy")
self.__strategy = strategy
self.__config = cfg
def get_groups(self, school_data, format="json"):
results = None
if not (self.__strategy):
raise ValueError("ERROR: Invalid strategy")
try:
algo = self.__strategy
if algo.run(school_data, self.__config):
results = algo.get_school_groups(school_data, format)
else:
s = "ERROR: Failed to generate school groups"
print(s)
return results
except Exception as e:
raise e
def get_group_bundles(self, school_data, format="json"):
results = None
if not (self.__strategy):
raise ValueError("ERROR: Invalid strategy")
try:
algo = self.__strategy
if algo.run(school_data, self.__config, bundle_groups=True):
results = algo.get_school_groups(school_data, format)
else:
s = "ERROR: Failed to generate school groups"
print(s)
return results
except Exception as e:
raise e
#
# Utility function to truncate a float (f) to the specified number (n) of
# decimals without rounding
#
def truncate(f, n):
try:
return math.floor(f * 10 ** n) / 10 ** n
except ValueError:
return "-"
#
# Function wrangle the school district input data to the necessary form to
# generate groupings of schools based on ISP
#
def prepare_data(df):
# convert fields to numeric as appropriate
NUMERIC_COLS = ['total_enrolled', 'frpm', 'foster', 'homeless', 'migrant', 'direct_cert']
df[NUMERIC_COLS] = df[NUMERIC_COLS].apply(pd.to_numeric)
# remove aggregated records
df = df[df['school_name'] != 'total']
# sum cols for homeless, migrant and foster students
df = df.assign(non_direct_cert=(df['foster'] + df['homeless'] + df['migrant']))
# compute total eligible and isp
total_eligible = (df['foster'] + df['homeless'] + df['migrant'] + df['direct_cert'])
isp = (total_eligible / df['total_enrolled']) * 100
df = df.assign(total_eligible=total_eligible)
df = df.assign(isp=isp)
df.loc[:, 'isp'] = df['isp'].astype(np.double);
KEEP_COLS = ['school_code', 'total_enrolled', 'direct_cert', 'non_direct_cert', 'total_eligible', 'isp']
# remove cols not needed for further analysis
drop_cols = [s for s in df.columns.tolist() if s not in set(KEEP_COLS)]
df.drop(drop_cols, axis=1, inplace=True)
# remove invalid samples
df = df.loc[df['total_eligible'] <= df['total_enrolled']]
# sort by isp
df.sort_values('isp', ascending=False, inplace=True)
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
# compute cumulative isp
cum_isp = (df['total_eligible'].cumsum() / df['total_enrolled'].cumsum()).astype(np.double) * 100
df = df.assign(cum_isp=cum_isp)
return df
#
# Function to generate summary data for the specified group of schools
#
def summarize_group(group_df, cfg):
# compute total eligible and total enrolled students across all schools in the group
summary = group_df[['total_enrolled', 'direct_cert', 'non_direct_cert', 'total_eligible']].aggregate(['sum'])
# compute the group's ISP
summary = summary.assign(grp_isp=(summary['total_eligible'] / summary['total_enrolled']) * 100)
# count the number of schools in the group
summary = summary.assign(size=group_df.shape[0])
# compute the % of meals covered at the free and paid rate for the group's ISP
grp_isp = summary.loc['sum', 'grp_isp']
free_rate = (grp_isp * 1.6) if grp_isp >= (cfg.min_cep_thold_pct() * 100) else 0.0
free_rate = 100. if free_rate > 100. else free_rate
summary = summary.assign(free_rate=free_rate)
paid_rate = (100.0 - free_rate)
summary = summary.assign(paid_rate=paid_rate)
return summary
#
# Function to select schools to add, from among all schools not already in the destination group (df),
# to the destination group (whose summary is provided as input) based on the impact each school has on the
# destination group's ISP. Target ISP specifies the desired ISP at which to maintain the destination group
#
def select_by_isp_impact(df, group_df, target_isp):
#print("Selecting by ",target_isp)
schools_to_add = pd.DataFrame();
dst_grp_total_enrolled = group_df.loc[:, 'total_enrolled'].sum()
dst_grp_total_eligible = group_df.loc[:, 'total_eligible'].sum()
new_total_enrolled = df.loc[:, 'total_enrolled'] + dst_grp_total_enrolled
new_isp = (((df.loc[:, 'total_eligible'] + dst_grp_total_eligible) / new_total_enrolled) * 100).astype(np.double)
isp_impact = pd.DataFrame({'new_isp': new_isp})
isp_impact.sort_values('new_isp', ascending=False, inplace=True)
# select all schools whose ISP impact is small enough to not bring down the new ISP
# to under the target ISP
idx = isp_impact[isp_impact['new_isp'] >= target_isp].index
if len(idx) > 0:
# add them to the existing group temporarily
tmp_group_df = pd.concat([group_df, df.loc[idx, :]], axis=0)
# recompute cumulative isp
cum_isp = (tmp_group_df['total_eligible'].cumsum() / tmp_group_df['total_enrolled'].cumsum()).astype(
np.double) * 100
tmp_group_df.loc[:, 'cum_isp'] = cum_isp
# retain only those that make the cut
bins = [0., target_isp, 100.]
tmp_groups = tmp_group_df.groupby(pd.cut(tmp_group_df['cum_isp'], bins))
ivals = tmp_groups.size().index.tolist()
tmp_df = tmp_groups.get_group(ivals[-1]).apply(list).apply(pd.Series)
# determine which subset of schools to actully add
potential_additions = idx
group_selections = tmp_df.index.tolist()
actual_additions = []
for x in potential_additions:
if x in group_selections:
actual_additions.append(x)
# generate schools to add
if (len(actual_additions)):
schools_to_add = df.loc[actual_additions, :]
return schools_to_add
#
# Function to take in school data and group them based on the ISP_WIDTH
#
def groupby_isp_width(df, cfg, target_isp_width=None):
min_cep_thold = (cfg.min_cep_thold_pct() * 100)
# use default ISP width if not specified as input
isp_width = cfg.isp_width() if target_isp_width is None else target_isp_width
# recalculate cumulative-isp
cum_isp = (df['total_eligible'].cumsum() / df['total_enrolled'].cumsum()).astype(np.double) * 100
df = df.assign(cum_isp=cum_isp)
top_isp = df.iloc[0]['isp']
# if the top ISP is less than that needed for CEP eligibility
# we have nothing more to do
if top_isp < min_cep_thold:
return None
# determine the next cut-off point
isp_thold = (top_isp - isp_width) if (top_isp - isp_width) >= min_cep_thold else min_cep_thold
#print("grouping by",isp_thold,top_isp)
# group schools at the cut-off point
# note that this will generate exactly 2 groups: one of length ISP_WIDTH and the other containing
# the rest of the schools
groups = df.groupby(pd.cut(df['cum_isp'], [0., isp_thold, top_isp]))
return groups
#
# Function that implements a strategy to group schools with ISPs lower than that needed for
# 100% CEP funding.
#
def group_schools_lo_isp(df, cfg, isp_width=None):
school_groups = []
school_group_summaries = []
try:
top_isp = df.iloc[0]['isp']
except IndexError:
top_isp = 0.0
# exit the loop if the highest ISP from among the remaining schools (which are sorted by ISP)
# is lower than that needed for CEP eligibility; we have nothing more to do
while top_isp >= (cfg.min_cep_thold_pct() * 100):
#print("using",top_isp)
# get the next isp_width group that still qualifies for CEP
groups = groupby_isp_width(df, cfg, isp_width)
if (groups != None):
ivals = pd.DataFrame(groups.size()).index.tolist()
# get the last group: this is the group of isp_width
group_df = groups.get_group(ivals[-1])
summary_df = summarize_group(group_df, cfg)
# trim the school data to remove this group
df.drop(group_df.index.tolist(), axis=0, inplace=True)
# from among remaining schools see if any qualify based on isp impact
schools_to_add = select_by_isp_impact(df, group_df, (cfg.max_cep_thold_pct() * 100))
if schools_to_add.shape[0] > 0:
group_df = pd.concat([group_df, schools_to_add], axis=0)
df.drop(schools_to_add.index.tolist(), axis=0, inplace=True)
school_groups.append(group_df)
summary_df = summarize_group(group_df, cfg)
school_group_summaries.append(summary_df)
# get the top isp for the remaining schools
try:
top_isp = df.iloc[0]['isp']
except IndexError:
top_isp = 0.0
# at this point all remaining schools are ineligible for CEP
# pass them along as a group of their own
cum_isp = (df['total_eligible'].cumsum() / df['total_enrolled'].cumsum()).astype(np.double) * 100
df = df.assign(cum_isp=cum_isp)
school_groups.append(df)
summary_df = summarize_group(df, cfg)
school_group_summaries.append(summary_df)
return school_groups, school_group_summaries
#
# Function that implements a strategy to group schools with ISPs higher than (or equal to)
# that needed for 100% CEP funding.
#
def group_schools_hi_isp(df, cfg):
school_groups = []
school_group_summaries = []
# group the data by cumulative ISP such that all schools with
# max CEP threshold and higher are part of a single group; the
# rest of the schools are in a second group
bins = [0., cfg.max_cep_thold_pct() * 100, 100.]
groups = df.groupby(pd.cut(df['cum_isp'], bins))
ivals = groups.size().index.tolist()
#import pdb; pdb.set_trace()
try:
group_df = groups.get_group(ivals[-1]).apply(list).apply(pd.Series)
except KeyError:
# This means there are no hi isp groups
group_df = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
import zipfile
import pandas as pd
from abc import ABC
from dataclasses import dataclass, field, InitVar
from micromind.io.image import (imread_color, imread_tiff, imread_czi,
imwrite, imwrite_tiff)
PNG = '.png'
JPG = '.jpg'
CSV = '.csv'
TIF = '.tif'
ZIP = '.zip'
LSM = '.lsm'
CZI = '.czi'
@dataclass
class DriveEntity(ABC):
path: InitVar[str]
_path: Path = field(init=False)
def __post_init__(self, path):
self._path = Path(path)
def __getattr__(self, attr):
return getattr(self._path, attr)
def __truediv__(self, key):
return self._path / key
@dataclass
class Directory(DriveEntity):
def __post_init__(self, path):
super().__post_init__(path)
if not self.exists():
_err = f'The directory {self._path} does not exist!'
raise ValueError(_err)
if not self.is_dir():
_err = f'The path {self._path} is not pointing to a directory!'
raise ValueError(_err)
def write(self, filename, filedata):
filepath = self / filename
extension = filepath.suffix
filepath = str(filepath)
if extension == PNG:
imwrite(filepath, filedata)
if extension == TIF or extension == LSM:
imwrite_tiff(filepath, filedata)
def read(self, filename):
filepath = self / filename
if not filepath.exists():
_err = f'The file {filepath} does not exist!'
raise ValueError(_err)
if not filepath.is_file():
_err = f'The path {filepath} is not pointing to a file!'
raise ValueError(_err)
extension = filepath.suffix
filepath = str(filepath)
if extension == PNG or extension == JPG:
return imread_color(filepath)
if extension == CSV:
return | pd.read_csv(filepath) | pandas.read_csv |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import os
import sys
import numpy as np
import pandas as pd
from .Error import DemandInputError
from .Logger import FastTripsLogger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
class Passenger(object):
"""
Passenger class.
One instance represents all of the households and persons that could potentially make transit trips.
Stores household information in :py:attr:`Passenger.households_df` and person information in
:py:attr:`Passenger.persons_df`, which are both :py:class:`pandas.DataFrame` instances.
"""
#: File with households
INPUT_HOUSEHOLDS_FILE = "household.txt"
#: Households column: Household ID
HOUSEHOLDS_COLUMN_HOUSEHOLD_ID = 'hh_id'
#: File with persons
INPUT_PERSONS_FILE = "person.txt"
#: Persons column: Household ID
PERSONS_COLUMN_HOUSEHOLD_ID = HOUSEHOLDS_COLUMN_HOUSEHOLD_ID
#: Persons column: Person ID (string)
PERSONS_COLUMN_PERSON_ID = 'person_id'
# ========== Added by fasttrips =======================================================
#: Persons column: Person ID number
PERSONS_COLUMN_PERSON_ID_NUM = 'person_id_num'
#: File with trip list
INPUT_TRIP_LIST_FILE = "trip_list.txt"
#: Trip list column: Person ID
TRIP_LIST_COLUMN_PERSON_ID = PERSONS_COLUMN_PERSON_ID
#: Trip list column: Person Trip ID
TRIP_LIST_COLUMN_PERSON_TRIP_ID = "person_trip_id"
#: Trip list column: Origin TAZ ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID = "o_taz"
#: Trip list column: Destination TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID = "d_taz"
#: Trip list column: Mode
TRIP_LIST_COLUMN_MODE = "mode"
#: Trip list column: Departure Time. DateTime.
TRIP_LIST_COLUMN_DEPARTURE_TIME = 'departure_time'
#: Trip list column: Arrival Time. DateTime.
TRIP_LIST_COLUMN_ARRIVAL_TIME = 'arrival_time'
#: Trip list column: Time Target (either 'arrival' or 'departure')
TRIP_LIST_COLUMN_TIME_TARGET = 'time_target'
# ========== Added by fasttrips =======================================================
#: Trip list column: Unique numeric ID for this passenger/trip
TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM = "trip_list_id_num"
#: Trip list column: Origin TAZ Numeric ID
TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM = "o_taz_num"
#: Trip list column: Destination Numeric TAZ ID
TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM = "d_taz_num"
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN = 'departure_time_min'
#: Trip list column: Departure Time. Float, minutes after midnight.
TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN = 'arrival_time_min'
#: Trip list column: Transit Mode
TRIP_LIST_COLUMN_TRANSIT_MODE = "transit_mode"
#: Trip list column: Access Mode
TRIP_LIST_COLUMN_ACCESS_MODE = "access_mode"
#: Trip list column: Egress Mode
TRIP_LIST_COLUMN_EGRESS_MODE = "egress_mode"
#: Trip list column: Outbound (bool), true iff time target is arrival
TRIP_LIST_COLUMN_OUTBOUND = "outbound"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (arrival time)
TIME_TARGET_ARRIVAL = "arrival"
#: Option for :py:attr:`Passenger.TRIP_LIST_COLUMN_TIME_TARGET` (departure time)
TIME_TARGET_DEPARTURE = "departure"
#: Generic transit. Specify this for mode when you mean walk, any transit modes, walk
#: TODO: get rid of this? Maybe user should always specify.
MODE_GENERIC_TRANSIT = "transit"
#: Generic transit - Numeric mode number
MODE_GENERIC_TRANSIT_NUM = 1000
#: Minumum Value of Time: 1 dollar shouldn't be worth 180 minutes
MIN_VALUE_OF_TIME = 60.0/180.0
#: Trip list column: User class. String.
TRIP_LIST_COLUMN_USER_CLASS = "user_class"
#: Trip list column: Purpose. String.
TRIP_LIST_COLUMN_PURPOSE = "purpose"
#: Trip list column: Value of time. Float.
TRIP_LIST_COLUMN_VOT = "vot"
#: Trip list column: Trace. Boolean.
TRIP_LIST_COLUMN_TRACE = "trace"
#: Column names from pathfinding
PF_COL_PF_ITERATION = 'pf_iteration' #: 0.01*pathfinding_iteration + iteration during which this path was found
PF_COL_PAX_A_TIME = 'pf_A_time' #: time path-finder thinks passenger arrived at A
PF_COL_PAX_B_TIME = 'pf_B_time' #: time path-finder thinks passenger arrived at B
PF_COL_LINK_TIME = 'pf_linktime' #: time path-finder thinks passenger spent on link
PF_COL_LINK_FARE = 'pf_linkfare' #: fare path-finder thinks passenger spent on link
PF_COL_LINK_COST = 'pf_linkcost' #: cost (generalized) path-finder thinks passenger spent on link
PF_COL_LINK_DIST = 'pf_linkdist' #: dist path-finder thinks passenger spent on link
PF_COL_WAIT_TIME = 'pf_waittime' #: time path-finder thinks passenger waited for vehicle on trip links
PF_COL_PATH_NUM = 'pathnum' #: path number, starting from 0
PF_COL_LINK_NUM = 'linknum' #: link number, starting from access
PF_COL_LINK_MODE = 'linkmode' #: link mode (Access, Trip, Egress, etc)
PF_COL_MODE = TRIP_LIST_COLUMN_MODE #: supply mode
PF_COL_ROUTE_ID = Trip.TRIPS_COLUMN_ROUTE_ID #: link route ID
PF_COL_TRIP_ID = Trip.TRIPS_COLUMN_TRIP_ID #: link trip ID
PF_COL_DESCRIPTION = 'description' #: path text description
#: todo replace/rename ??
PF_COL_PAX_A_TIME_MIN = 'pf_A_time_min'
#: pathfinding results
PF_PATHS_CSV = r"enumerated_paths.csv"
PF_LINKS_CSV = r"enumerated_links.csv"
#: results - PathSets
PATHSET_PATHS_CSV = r"pathset_paths.csv"
PATHSET_LINKS_CSV = r"pathset_links.csv"
def __init__(self, input_dir, output_dir, today, stops, routes, capacity_constraint):
"""
Constructor from dictionary mapping attribute to value.
"""
# if no demand dir, nothing to do
if input_dir == None:
self.trip_list_df = pd.DataFrame()
return
FastTripsLogger.info("-------- Reading demand --------")
FastTripsLogger.info("Capacity constraint? %x" % capacity_constraint )
self.trip_list_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_TRIP_LIST_FILE),
skipinitialspace=True, ##LMZ
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID :'S',
Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID :'S',
Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID:'S',
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME :'S',
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME :'S',
Passenger.TRIP_LIST_COLUMN_PURPOSE :'S'})
trip_list_cols = list(self.trip_list_df.columns.values)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_TIME_TARGET in trip_list_cols)
assert(Passenger.TRIP_LIST_COLUMN_VOT in trip_list_cols)
FastTripsLogger.debug("=========== TRIP LIST ===========\n" + str(self.trip_list_df.head()))
FastTripsLogger.debug("\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.trip_list_df), "person trips", Passenger.INPUT_TRIP_LIST_FILE))
# Error on missing person ids or person_trip_ids
missing_person_ids = self.trip_list_df[pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])]
if len(missing_person_ids)>0:
error_msg = "Missing person_id or person_trip_id fields:\n%s\n" % str(missing_person_ids)
error_msg += "Use 0 for person_id for trips without corresponding person."
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Drop (warn) on missing origins or destinations
missing_ods = self.trip_list_df[ pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID])|
pd.isnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ]
if len(missing_ods)>0:
FastTripsLogger.warn("Missing origin or destination for the following trips. Dropping.\n%s" % str(missing_ods))
self.trip_list_df = self.trip_list_df.loc[ pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID ])&
pd.notnull(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID]) ].reset_index(drop=True)
FastTripsLogger.warn("=> Have %d person trips" % len(self.trip_list_df))
non_zero_person_ids = len(self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_PERSON_ID]!="0"])
if non_zero_person_ids > 0 and os.path.exists(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE)):
self.persons_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_PERSONS_FILE),
skipinitialspace=True,
dtype={Passenger.PERSONS_COLUMN_PERSON_ID:'S'})
self.persons_id_df = Util.add_numeric_column(self.persons_df[[Passenger.PERSONS_COLUMN_PERSON_ID]],
id_colname=Passenger.PERSONS_COLUMN_PERSON_ID,
numeric_newcolname=Passenger.PERSONS_COLUMN_PERSON_ID_NUM)
self.persons_df = pd.merge(left=self.persons_df, right=self.persons_id_df,
how="left")
persons_cols = list(self.persons_df.columns.values)
FastTripsLogger.debug("=========== PERSONS ===========\n" + str(self.persons_df.head()))
FastTripsLogger.debug("\n"+str(self.persons_df.index.dtype)+"\n"+str(self.persons_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.persons_df), "persons", Passenger.INPUT_PERSONS_FILE))
self.households_df = pd.read_csv(os.path.join(input_dir, Passenger.INPUT_HOUSEHOLDS_FILE), skipinitialspace=True)
household_cols = list(self.households_df.columns.values)
FastTripsLogger.debug("=========== HOUSEHOLDS ===========\n" + str(self.households_df.head()))
FastTripsLogger.debug("\n"+str(self.households_df.index.dtype)+"\n"+str(self.households_df.dtypes))
FastTripsLogger.info("Read %7d %15s from %25s" %
(len(self.households_df), "households", Passenger.INPUT_HOUSEHOLDS_FILE))
else:
self.persons_df = pd.DataFrame()
self.households_df = pd.DataFrame()
# make sure that each tuple TRIP_LIST_COLUMN_PERSON_ID, TRIP_LIST_COLUMN_PERSON_TRIP_ID is unique
self.trip_list_df["ID_dupes"] = self.trip_list_df.duplicated(subset=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID],
keep=False)
if self.trip_list_df["ID_dupes"].sum() > 0:
error_msg = "Duplicate IDs (%s, %s) found:\n%s" % \
(Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
self.trip_list_df.loc[self.trip_list_df["ID_dupes"]==True].to_string())
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# Create unique numeric index
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM] = self.trip_list_df.index + 1
# datetime version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: Util.read_time(x))
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: Util.read_time(x))
# float version
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN] = \
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME].map(lambda x: \
60*x.time().hour + x.time().minute + (x.time().second/60.0) )
# TODO: validate fields?
# value of time must be greater than a threshhold or any fare becomes prohibitively expensive
low_vot = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME ]
if len(low_vot) > 0:
FastTripsLogger.warn("These trips have value of time lower than the minimum threshhhold (%f): raising to minimum.\n%s" %
(Passenger.MIN_VALUE_OF_TIME, str(low_vot) ))
self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_VOT] < Passenger.MIN_VALUE_OF_TIME,
Passenger.TRIP_LIST_COLUMN_VOT] = Passenger.MIN_VALUE_OF_TIME
if len(self.persons_df) > 0:
# Join trips to persons
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.persons_df,
how='left',
on=Passenger.TRIP_LIST_COLUMN_PERSON_ID)
# are any null?
no_person_ids = self.trip_list_df.loc[ pd.isnull(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM])&
(self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID]!="0")]
if len(no_person_ids) > 0:
error_msg = "Even though a person list is given, failed to find person information for %d trips" % len(no_person_ids)
FastTripsLogger.fatal(error_msg)
FastTripsLogger.fatal("\n%s\n" % no_person_ids.to_string())
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# And then to households
self.trip_list_df = pd.merge(left=self.trip_list_df, right=self.households_df,
how='left',
on=Passenger.PERSONS_COLUMN_HOUSEHOLD_ID)
else:
# Give each passenger a unique person ID num
self.trip_list_df[Passenger.PERSONS_COLUMN_PERSON_ID_NUM] = self.trip_list_df.index + 1
# add TAZ numeric ids (stored in the stop mapping)
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as origins in demand file are not found in the network")
self.trip_list_df = stops.add_numeric_stop_id(self.trip_list_df,
id_colname =Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID,
numeric_newcolname=Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID_NUM,
warn =True,
warn_msg ="TAZ numbers configured as destinations in demand file are not found in the network")
# trips with invalid TAZs have been dropped
FastTripsLogger.debug("Have %d person trips" % len(self.trip_list_df))
# figure out modes:
if Passenger.TRIP_LIST_COLUMN_MODE not in trip_list_cols:
# default to generic walk-transit-walk
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df['mode_dash_count'] = 0
else:
# count the dashes in the mode
self.trip_list_df['mode_dash_count'] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x.count('-'))
# The only modes allowed are access-transit-egress or MODE_GENERIC_TRANSIT
bad_mode_df = self.trip_list_df.loc[((self.trip_list_df['mode_dash_count']!=2)&
((self.trip_list_df['mode_dash_count']!=0)|
(self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]!=Passenger.MODE_GENERIC_TRANSIT)))]
if len(bad_mode_df) > 0:
FastTripsLogger.fatal("Could not understand column '%s' in the following: \n%s" %
(Passenger.TRIP_LIST_COLUMN_MODE,
bad_mode_df[[Passenger.TRIP_LIST_COLUMN_MODE,'mode_dash_count']].to_string()))
sys.exit(2)
# Take care of the transit generic
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = Passenger.MODE_GENERIC_TRANSIT
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==0,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE ] = "%s" % TAZ.ACCESS_EGRESS_MODES[0]
# Take care of the access-transit-egress
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[:x.find('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: x[x.find('-')+1:x.rfind('-')])
self.trip_list_df.loc[self.trip_list_df['mode_dash_count']==2,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE] = self.trip_list_df[Passenger.TRIP_LIST_COLUMN_MODE]\
.map(lambda x: "%s" % x[x.rfind('-')+1:])
# We're done with mode_dash_count, thanks for your service
self.trip_list_df.drop('mode_dash_count', axis=1, inplace=True) # replace with cumsum
# validate time_target
invalid_time_target = self.trip_list_df.loc[ self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET].isin(
[Passenger.TIME_TARGET_ARRIVAL, Passenger.TIME_TARGET_DEPARTURE])==False ]
if len(invalid_time_target) > 0:
error_msg = "Invalid value in column %s:\n%s" % (Passenger.TRIP_LIST_COLUMN_TIME_TARGET, str(invalid_time_target))
FastTripsLogger.fatal(error_msg)
raise DemandInputError(Passenger.INPUT_TRIP_LIST_FILE, error_msg)
# set outbound
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_OUTBOUND] = (self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL)
# Set the user class for each trip
from .PathSet import PathSet
PathSet.set_user_class(self.trip_list_df, Passenger.TRIP_LIST_COLUMN_USER_CLASS)
# Verify that PathSet has all the configuration for these user classes + transit modes + access modes + egress modes
# => Figure out unique user class + mode combinations
self.modes_df = self.trip_list_df[[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE]].set_index([Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE])
# stack - so before we have three columns: transit_mode, access_mode, egress_mode
# after, we have two columns: demand_mode_type and the value, demand_mode
self.modes_df = self.modes_df.stack().to_frame()
self.modes_df.index.names = [Passenger.TRIP_LIST_COLUMN_USER_CLASS, Passenger.TRIP_LIST_COLUMN_PURPOSE, PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]
self.modes_df.columns = [PathSet.WEIGHTS_COLUMN_DEMAND_MODE]
self.modes_df.reset_index(inplace=True)
self.modes_df.drop_duplicates(inplace=True)
# fix demand_mode_type since transit_mode is just transit, etc
self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE] = self.modes_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE].apply(lambda x: x[:-5])
FastTripsLogger.debug("Demand mode types by class & purpose: \n%s" % str(self.modes_df))
# Make sure we have all the weights required for these user_class/mode combinations
self.trip_list_df = PathSet.verify_weight_config(self.modes_df, output_dir, routes, capacity_constraint, self.trip_list_df)
# add column trace
from .Assignment import Assignment
if len(Assignment.TRACE_IDS) > 0:
trace_df = pd.DataFrame.from_records(data=Assignment.TRACE_IDS,
columns=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]).astype(object)
trace_df[Passenger.TRIP_LIST_COLUMN_TRACE] = True
# combine
self.trip_list_df = pd.merge(left=self.trip_list_df,
right=trace_df,
how="left",
on=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# make nulls into False
self.trip_list_df.loc[pd.isnull(
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE]), Passenger.TRIP_LIST_COLUMN_TRACE] = False
else:
self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE] = False
FastTripsLogger.info("Have %d person trips" % len(self.trip_list_df))
FastTripsLogger.debug("Final trip_list_df\n"+str(self.trip_list_df.index.dtype)+"\n"+str(self.trip_list_df.dtypes))
FastTripsLogger.debug("\n"+self.trip_list_df.head().to_string())
#: Maps trip_list_id to :py:class:`PathSet` instance. Use trip_list_id instead of (person_id, person_trip_id) for simplicity and to iterate sequentially
#: in setup_passenger_pathsets()
self.id_to_pathset = collections.OrderedDict()
def add_pathset(self, trip_list_id, pathset):
"""
Stores this path set for the trip_list_id.
"""
self.id_to_pathset[trip_list_id] = pathset
def get_pathset(self, trip_list_id):
"""
Retrieves a stored path set for the given trip_list_id
"""
return self.id_to_pathset[trip_list_id]
def get_person_id(self, trip_list_id):
to_ret = self.trip_list_df.loc[self.trip_list_df[Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM]==trip_list_id,
[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]]
return(to_ret.iloc[0,0], to_ret.iloc[0,1])
def read_passenger_pathsets(self, pathset_dir, stops, modes_df, include_asgn=True):
"""
Reads the dataframes described in :py:meth:`Passenger.setup_passenger_pathsets` and returns them.
:param pathset_dir: Location of csv files to read
:type pathset_dir: string
:param include_asgn: If true, read from files called :py:attr:`Passenger.PF_PATHS_CSV` and :py:attr:`Passenger.PF_LINKS_CSV`.
Otherwise read from files called :py:attr:`Passenger.PATHSET_PATHS_CSV` and :py:attr:`Passenger.PATHSET_LINKS_CSV` which include assignment results.
:return: See :py:meth:`Assignment.setup_passengers`
for documentation on the passenger paths :py:class:`pandas.DataFrame`
:rtype: a tuple of (:py:class:`pandas.DataFrame`, :py:class:`pandas.DataFrame`)
"""
# read existing paths
paths_file = os.path.join(pathset_dir, Passenger.PATHSET_PATHS_CSV if include_asgn else Passenger.PF_PATHS_CSV)
pathset_paths_df = pd.read_csv(paths_file,
skipinitialspace=True,
dtype={Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S'})
FastTripsLogger.info("Read %s" % paths_file)
FastTripsLogger.debug("pathset_paths_df.dtypes=\n%s" % str(pathset_paths_df.dtypes))
from .Assignment import Assignment
date_cols = [Passenger.PF_COL_PAX_A_TIME, Passenger.PF_COL_PAX_B_TIME]
if include_asgn:
date_cols.extend([Assignment.SIM_COL_PAX_BOARD_TIME,
Assignment.SIM_COL_PAX_ALIGHT_TIME,
Assignment.SIM_COL_PAX_A_TIME,
Assignment.SIM_COL_PAX_B_TIME])
links_dtypes = {Passenger.TRIP_LIST_COLUMN_PERSON_ID :'S',
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID:'S',
Trip.TRIPS_COLUMN_TRIP_ID :'S',
"A_id" :'S',
"B_id" :'S',
Passenger.PF_COL_ROUTE_ID :'S',
Passenger.PF_COL_TRIP_ID :'S'}
# read datetimes as string initially
for date_col in date_cols:
links_dtypes[date_col] = 'S'
links_file = os.path.join(pathset_dir, Passenger.PATHSET_LINKS_CSV if include_asgn else Passenger.PF_LINKS_CSV)
pathset_links_df = pd.read_csv(links_file, skipinitialspace=True, dtype=links_dtypes)
# convert time strings to datetimes
for date_col in date_cols:
if date_col in pathset_links_df.columns.values:
pathset_links_df[date_col] = pathset_links_df[date_col].map(lambda x: Util.read_time(x))
# convert time duration columns to time durations
link_cols = list(pathset_links_df.columns.values)
if Passenger.PF_COL_LINK_TIME in link_cols:
pathset_links_df[Passenger.PF_COL_LINK_TIME] = | pd.to_timedelta(pathset_links_df[Passenger.PF_COL_LINK_TIME]) | pandas.to_timedelta |
#!/usr/bin/python3
from pysimbotlib.core import PySimbotApp, Simbot, Robot, Util
from kivy.logger import Logger
from kivy.config import Config
# # Force the program to show user's log only for "info" level or more. The info log will be disabled.
# Config.set('kivy', 'log_level', 'debug')
Config.set('graphics', 'maxfps', 10)
import pandas as pd
import numpy as np
import random
from typing import Tuple, Dict
def dist_to_label(dist: float) -> str:
if dist < 10:
return 'C'
elif 10 <= dist < 25:
return 'N'
elif 25 <= dist < 50:
return 'M'
elif 50 <= dist < 75:
return 'F'
else:
return 'L'
def angle_to_label(angle: float) -> str:
if -10 <= angle <= 10:
return 'C'
elif 10 < angle < 45:
return 'R'
elif 45 <= angle < 90:
return 'Z'
elif 90 <= angle <= 180 or -180 <= angle <= -90:
return 'B'
elif -90 < angle <= -45:
return 'A'
elif -45 < angle < -10:
return 'L'
def convert_data(data: pd.DataFrame) -> None:
data.iloc[:, 0:8] = data.iloc[:, 0:8].applymap(dist_to_label)
data.iloc[:, 8:9] = data.iloc[:, 8:9].applymap(angle_to_label)
def turnmove_to_class(row) -> None:
turn = row['turn']
move = row['move']
if turn == 5 and move == 0:
return 'R'
elif turn == -5 and move == 0:
return 'L'
elif turn == 0 and move == 5:
return 'F'
elif turn == 0 and move == -5:
return 'B'
elif turn == 5 and move == 5:
return 'FR'
elif turn == -5 and move == 5:
return 'FL'
else:
raise NotImplementedError()
class NaiveBayes:
# Learning phase to build the CPT
def __init__(self, filename: str):
data = pd.read_csv(filename, sep=',')
convert_data(data)
data['action'] = data.apply(turnmove_to_class, axis=1)
self.action_class = ['R','L','F','B','FR','FL']
self.dist_class = ['C','N','M','F','L']
self.angle_class = ['C','R','Z','B','A','L']
temp_data_mid = dict()
temp_data2 = dict()
temp_prop_large = dict()
sep_data = dict()
num_train_set = data.count()[0]
print(f"num_train_set: {num_train_set}")
# Separate Data into 7 action class
for sep in self.action_class:
sep_data[sep] = data[data['action'] == sep]
for action in self.action_class:
each_data = sep_data[action]
amount_data = max(1,sep_data[action].count()[0])
# print(f"\nClass {action} : {amount_data}")
for i in data.columns[0:9]:
prob_data = 0.0
if i == 'angle':
for j in self.angle_class:
prob_data = each_data[each_data[i]==j].count()[0]/amount_data
temp_data2[j] = prob_data ; prob_data = dict()
else:
for j in self.dist_class:
prob_data = each_data[each_data[i]==j].count()[0]/amount_data
temp_data2[j] = prob_data ; prob_data = dict()
# print(f"i: {i}")
temp_data_mid[i]=temp_data2 ; temp_data2 = dict()
temp_prop_large[action] = temp_data_mid ; temp_data_mid = dict()
self.frequency_dict = temp_prop_large; temp_prop_large = dict()
print(f"frequency_dict: \n{self.frequency_dict}")
# Calculate prob of action class
self.action_prop = dict()
for sep in self.action_class:
self.action_prop[sep] = data[data['action'] == sep].count()[0]/num_train_set
print(f"self.action_prop: {self.action_prop}")
# put your code here
#
#
# find action that gives the highest conditional probability
def classify(self, input_data: pd.DataFrame) -> str:
#
#
# put your code here
prob_all_class = dict()
# print(f"input_data: \n{input_data}")
self.sensor_class = ['ir0','ir1','ir2','ir3','ir4','ir5','ir6','ir7','angle']
for action in self.action_class:
# print(action)
multiple_data = float()
for i in range(len(input_data.columns)):
# print(f'i: {i}')
value_prob = max(0.01, self.frequency_dict[action][self.sensor_class[i]][input_data.iloc[0][i]])
# print(f"value_prob: {value_prob}")
# Multiple All Prop
if multiple_data == 0.0:
multiple_data = value_prob
else:
multiple_data *= value_prob
multiple_data *= self.action_prop[action]
prob_all_class.update({
action : multiple_data
})
# Finding Max and get action
action = max((v,k) for k,v in prob_all_class.items())[1]
# print(f"input_data: \n{input_data.iloc[0][8]} \n{input_data}")
print(f"action: {action} \t angle: {input_data.iloc[0][8]}")
# action = 'F'
return action
class NBRobot(Robot):
def __init__(self, **kwarg):
super(NBRobot, self).__init__(**kwarg)
# Learning Phase
file_name = "_bb"
self.nb = NaiveBayes('history' + file_name + ".csv")
def update(self):
# read sensor value
ir_values = self.distance() # [0, 100]
angle = self.smell() # [-180, 180]
# create 2D array of size 1 x 9
sensor = np.zeros((1, 9))
# set the values of the input sensor
for i, ir in enumerate(ir_values):
sensor[0][i] = ir
## use this line if the training data is from Jet
sensor[0][8] = angle
input_data = | pd.DataFrame(sensor) | pandas.DataFrame |
import numpy as np
import seaborn as sns
import pandas as pd
import math
import matplotlib.pyplot as plt
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"],
"text.latex.preamble": [r'\usepackage{amsfonts}'],
'font.size': 15,
"xtick.labelsize": 15, # large tick labels
"ytick.labelsize": 15, # large tick labels
'figure.figsize': [9, 6]} # default: 6.4 and 4.8
)
def barplot_err(x, y, yerr=None, legend_loc=0, data=None, ax=None, **kwargs):
_data = []
for _i in data.index:
_row = data.loc[_i]
if _row[yerr] is not None:
_data_i = pd.concat([data.loc[_i:_i]] * 3, ignore_index=True, sort=False)
_data_i[y] = [_row[y] - _row[yerr], _row[y], _row[y] + _row[yerr]]
else:
_data_i = pd.concat([data.loc[_i:_i]], ignore_index=True, sort=False)
_data_i[y] = _row[y]
_data.append(_data_i)
_data = pd.concat(_data, ignore_index=True, sort=False)
_ax = sns.barplot(x=x, y=y, data=_data, ci='sd', ax=ax, **kwargs)
_ax.legend(loc=legend_loc, fontsize=12)
# _ax.set_yscale("log")
return _ax
def plotLossFunction(results_folder_path, save_pdf=False):
plt.figure("Loss function")
filename2 = results_folder_path + "training_history.csv"
training_history = np.loadtxt(filename2, delimiter=",", skiprows=1)
steps = training_history[:, 0].astype(int)
loss_trj = training_history[:, 1]
cpu_time = training_history[-1, 2]
print("Training time %d seconds" % cpu_time)
# plot the loss function
plt.plot(steps, loss_trj, color='g', linewidth=2)
plt.xlabel('Steps')
plt.ylabel('Loss')
if save_pdf:
plt.savefig(results_folder_path + "loss_function.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
#
# def plotFunctionTrajectory(results_folder_path, func_names):
# plt.figure("Function Trajectory")
# filename = results_folder_path + "function_value_data.csv"
# filename2 = results_folder_path + "training_history.csv"
# training_history = np.loadtxt(filename2, delimiter=",", skiprows=1)
# steps = training_history[:, 0].astype(int)
# loss_trj = training_history[:, 1]
# cpu_time = training_history[-1, 2]
# del training_history
# print("Training time %d seconds" % cpu_time)
#
# function_value_data = pd.read_csv(filename, delimiter=",",
# names=func_names)
#
# function_value_data.insert(loc=0, column="Steps", value=steps)
# function_value_data.set_index("Steps")
# sns.set(rc={'figure.figsize': (10, 7)})
# for f in func_names:
# ax = sns.lineplot(x="Steps", y=f, data=function_value_data)
# ax.set(ylabel="function values")
# plt.legend(labels=func_names)
def plot_validation_charts_function(results_folder_path, func_names, exact_values, save_pdf=False):
filename = results_folder_path + "function_value_data.csv"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names)
dnn_func_values = function_value_data.tail(1).to_numpy()[0, :]
del function_value_data
# we first compare the mean function estimates
filename = results_folder_path + "SimulationValidation.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
ssa_func_values_mean = function_value_data.values[0, :]
ssa_func_values_std = function_value_data.values[1, :]
dict1 = {"Function": func_names, "Estimate": ssa_func_values_mean,
"Error": 1.96 * ssa_func_values_std, "Estimator": "SSA"}
dict2 = {"Function": func_names, "Estimate": dnn_func_values,
"Error": None, "Estimator": "DeepCME"}
df1 = pd.DataFrame(dict1)
df2 = pd.DataFrame(dict2)
if exact_values is not None:
dict3 = {"Function": func_names, "Estimate": np.array(exact_values),
"Error": None, "Estimator": "Exact"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
else:
filename = results_folder_path + "SimulationValidation_exact.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean2 = function_value_data.values[0, :]
sim_est_values_std2 = function_value_data.values[1, :]
dict3 = {"Function": func_names, "Estimate": sim_est_func_values_mean2,
"Error": 1.96 * sim_est_values_std2, "Estimator": "mNRM ($10^4$ samples)"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
df.set_index(np.arange(0, 3 * len(func_names)), inplace=True)
plt.figure("Estimated function values")
barplot_err(x="Function", y="Estimate", legend_loc=3, yerr="Error", hue="Estimator",
capsize=.2, data=df)
if save_pdf:
plt.savefig(results_folder_path + "func_estimates.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plot_validation_charts_function_separate(results_folder_path, func_names, exact_values, save_pdf=False):
filename = results_folder_path + "function_value_data.csv"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names)
dnn_func_values = function_value_data.tail(1).to_numpy()[0, :]
print(dnn_func_values)
del function_value_data
# we first compare the mean function estimates
filename = results_folder_path + "SimulationValidation.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean = function_value_data.values[0, :]
sim_est_values_std = function_value_data.values[1, :]
dict1 = {"Function": func_names, "Estimate": sim_est_func_values_mean,
"Error": 1.96 * sim_est_values_std, "Estimator": "mNRM ($10^3$ samples)"}
dict2 = {"Function": func_names, "Estimate": dnn_func_values,
"Error": None, "Estimator": "DeepCME"}
df1 = pd.DataFrame(dict1)
df2 = pd.DataFrame(dict2)
if exact_values is not None:
dict3 = {"Function": func_names, "Estimate": np.array(exact_values),
"Error": None, "Estimator": "Exact"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
else:
filename = results_folder_path + "SimulationValidation_exact.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean2 = function_value_data.values[0, :]
sim_est_values_std2 = function_value_data.values[1, :]
dict3 = {"Function": func_names, "Estimate": sim_est_func_values_mean2,
"Error": 1.96 * sim_est_values_std2, "Estimator": "mNRM ($10^4$ samples)"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
df.set_index(np.arange(0, 3 * len(func_names)), inplace=True)
fig, axs = plt.subplots(1, len(func_names), num="Estimated function values")
for i in range(len(func_names)):
barplot_err(x="Function", y="Estimate", legend_loc=3, yerr="Error", hue="Estimator",
capsize=.2, data=df.loc[df["Function"] == func_names[i]], ax=axs[i])
if i == 0:
axs[i].set_ylabel("Function Estimate")
else:
axs[i].set_ylabel("")
axs[i].set_title(func_names[i])
axs[i].set_xticklabels("")
axs[i].set_xlabel("")
if save_pdf:
plt.savefig(results_folder_path + "func_estimates.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plot_validation_charts_sensitivity(results_folder_path, func_names, parameter_list, parameter_labels,
exact_sens_estimates,
save_pdf=False):
filename1 = results_folder_path + "BPA_Sens_Values.txt"
filename2 = results_folder_path + "DNN_Sens_Values.txt"
filename3 = results_folder_path + "BPA_Sens_Values_exact.txt"
if exact_sens_estimates is not None:
sens_exact2 = pd.DataFrame(exact_sens_estimates, columns=func_names)
func_names.insert(0, "Parameter")
else:
func_names.insert(0, "Parameter")
sens_exact2 = pd.read_csv(filename3, delimiter=",", names=func_names, skiprows=3)
sens_bpa2 = pd.read_csv(filename1, delimiter=",", names=func_names, skiprows=3)
sens_dnn2 = pd.read_csv(filename2, delimiter=",", names=func_names, skiprows=1)
if exact_sens_estimates is not None:
sens_exact2.insert(0, "Parameter", sens_dnn2["Parameter"])
if parameter_list:
_extended = []
for param in parameter_list:
_extended.append(param + "(std.)")
for param in _extended:
parameter_list.append(param)
sens_bpa = sens_bpa2[sens_bpa2["Parameter"].isin(parameter_list)]
sens_dnn = sens_dnn2[sens_dnn2["Parameter"].isin(parameter_list)]
sens_exact = sens_exact2[sens_exact2["Parameter"].isin(parameter_list)]
else:
sens_bpa = sens_bpa2
sens_dnn = sens_dnn2
sens_exact = sens_exact2
addl_col1 = ["BPA ($10^3$ samples)" for _ in range(len(sens_bpa.index))]
addl_col2 = ["DeepCME" for _ in range(len(sens_dnn.index))]
if exact_sens_estimates is not None:
addl_col3 = ["Exact" for _ in range(len(sens_exact.index))]
else:
addl_col3 = ["BPA ($10^4$ samples)" for _ in range(len(sens_exact.index))]
sens_bpa.insert(3, "Estimator", addl_col1)
sens_dnn.insert(3, "Estimator", addl_col2)
sens_exact.insert(3, "Estimator", addl_col3)
sens_bpa_mean = sens_bpa.head(int(sens_bpa.shape[0] / 2))
sens_bpa_mean_std = sens_bpa.tail(int(sens_bpa.shape[0] / 2))
if exact_sens_estimates is None:
sens_exact_mean = sens_exact.head(int(sens_exact.shape[0] / 2))
sens_exact_mean_std = sens_exact.tail(int(sens_exact.shape[0] / 2))
fig, axs = plt.subplots(1, len(func_names) - 1, num='Estimated Parameter Sensitivities for output functions')
for i in range(len(func_names) - 1):
# sns.barplot(data=df, x="Parameter", y=func_names[i + 1], hue="Estimator")
f1 = sens_bpa_mean[["Parameter", func_names[i + 1], "Estimator"]]
stds = sens_bpa_mean_std[func_names[i + 1]].to_numpy()
f1.insert(2, "Error", 1.96 * stds)
f2 = sens_dnn[["Parameter", func_names[i + 1], "Estimator"]]
f2.insert(2, "Error", None)
if exact_sens_estimates is not None:
f3 = sens_exact[["Parameter", func_names[i + 1], "Estimator"]]
f3.insert(2, "Error", None)
df = pd.concat([f2, f1, f3])
else:
f3 = sens_exact_mean[["Parameter", func_names[i + 1], "Estimator"]]
stds = sens_exact_mean_std[func_names[i + 1]].to_numpy()
f3.insert(2, "Error", 1.96 * stds)
df = pd.concat([f2, f1, f3])
df.set_index(np.arange(df.shape[0]), inplace=True)
# sns.set(rc={'figure.figsize': (9, 6)})
barplot_err(x="Parameter", y=func_names[i + 1], yerr="Error", legend_loc=3, hue="Estimator",
capsize=.2, data=df, ax=axs[i])
if i == 0:
axs[i].set_ylabel("Parameter Sensitivity Estimate")
else:
axs[i].set_ylabel("")
axs[i].set_title(func_names[i + 1])
if parameter_list:
axs[i].set_xticklabels(parameter_labels, fontsize=20)
if save_pdf:
plt.savefig(results_folder_path + "Param_Sens.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plotAllLossFunctions(result_folder_path, species_list, save_pdf=False):
plt.rcParams.update({'figure.figsize': [6.4, 4.8]})
plt.figure("Loss functions")
loss_profile_dict = {}
result_folder_path_root = result_folder_path.rstrip('0123456789/')
for species in species_list:
filename = result_folder_path_root + str(species) + "/training_history.csv"
training_history = np.loadtxt(filename, delimiter=",", skiprows=1)
steps = training_history[:, 0].astype(int)
if training_history[0, 1] != 0:
loss_trj = training_history[:, 1] / training_history[0, 1]
else:
loss_trj = training_history[:, 1]
loss_profile_dict[species] = [steps, loss_trj]
# plot the loss functions
sns.set_style("ticks")
sns.set_context("paper", font_scale=1.4)
for species in species_list:
ax = sns.lineplot(x=loss_profile_dict[species][0], y=loss_profile_dict[species][1], linewidth=2,
label="\# species = " + str(species))
ax.legend(loc=0, fontsize=20)
ax.grid(b=True, which='major', linewidth=1.0)
ax.grid(b=True, which='minor', linewidth=0.5)
# ax.set(ylabel='Loss')
# ax.set(xlabel="steps")
# ax.set(ylabel="")
ax.set(xlabel="")
if save_pdf:
print("File saved: " + result_folder_path_root + str(species_list[0]) + "/all_loss_functions.pdf")
plt.savefig(result_folder_path_root + str(species_list[0]) + "/all_loss_functions.pdf", bbox_inches='tight',
transparent="False", pad_inches=0)
def plotAllCPUTimes(result_folder_path, species_list, save_pdf=False):
plt.rcParams.update({'figure.figsize': [9, 6]})
plt.figure("CPU Times")
num_species = []
method = []
cpu_time = []
result_folder_path_root = result_folder_path.rstrip('0123456789/')
for species in species_list:
temp = np.loadtxt(result_folder_path_root + str(species) + "/training_history.csv", delimiter=",", skiprows=1)
cpu_time.append(temp[-1, 2])
method.append("DeepCME")
num_species.append(species)
temp = np.loadtxt(result_folder_path_root + str(species) + "/SimulationValidation.txt", delimiter=":",
skiprows=1, max_rows=1, dtype=str)
sim_time = float(temp[1])
temp = np.loadtxt(result_folder_path_root + str(species) + "/BPA_Sens_Values.txt", delimiter=":",
skiprows=1, max_rows=1, dtype=str)
sim_time += float(temp[1])
method.append("Simulation")
cpu_time.append(sim_time)
num_species.append(species)
d = {"\# species": num_species, "CPU Time (seconds)": cpu_time, "Method": method}
df = pd.DataFrame.from_dict(d)
# sns.set_theme()
sns.set_style("ticks")
sns.set_context("paper", font_scale=1.4)
ax = sns.barplot(x="\# species", y="CPU Time (seconds)", hue="Method", data=df)
ax.set_yscale('log')
plt.grid()
ax.legend(loc=0, fontsize=20)
ax.grid(b=True, which='major', linewidth=1.0)
ax.grid(b=True, which='minor', linewidth=0.5)
ax.set(ylabel="")
ax.set(xlabel="")
ax.get_xaxis().set_ticks([])
# ax.get_yaxis().set_ticks([])
if save_pdf:
print("File saved: " + result_folder_path_root + str(species_list[0]) + "/all_cpu_times.pdf")
plt.savefig(result_folder_path_root + str(species_list[0]) + "/all_cpu_times.pdf", bbox_inches='tight',
transparent="False", pad_inches=0)
def plot_validation_charts_function_separate_comparison(results_folder_path, results_folder_path2, func_names,
exact_values, save_pdf=False):
filename = results_folder_path + "function_value_data.csv"
function_value_data = pd.read_csv(filename, delimiter=",", names=func_names)
dnn_func_values = function_value_data.tail(1).to_numpy()[0, :]
filename2 = results_folder_path2 + "function_value_data.csv"
function_value_data2 = pd.read_csv(filename2, delimiter=",", names=func_names)
dnn_func_values2 = function_value_data2.tail(1).to_numpy()[0, :]
print(dnn_func_values)
print(dnn_func_values2)
del function_value_data
del function_value_data2
dict1 = {"Function": func_names, "Estimate": dnn_func_values2, "Error": None, "Estimator": "DeepCME ($N_H$ = 4)"}
dict2 = {"Function": func_names, "Estimate": dnn_func_values, "Error": None, "Estimator": "DeepCME ($N_H$ = 8)"}
df1 = pd.DataFrame(dict1)
df2 = pd.DataFrame(dict2)
if exact_values is not None:
dict3 = {"Function": func_names, "Estimate": np.array(exact_values),
"Error": None, "Estimator": "Exact"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df1, df2, df3])
else:
filename = results_folder_path + "SimulationValidation_exact.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean2 = function_value_data.values[0, :]
sim_est_values_std2 = function_value_data.values[1, :]
dict3 = {"Function": func_names, "Estimate": sim_est_func_values_mean2,
"Error": 1.96 * sim_est_values_std2, "Estimator": "mNRM ($10^4$ samples)"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df1, df2, df3])
df.set_index(np.arange(0, 3 * len(func_names)), inplace=True)
fig, axs = plt.subplots(1, len(func_names), num="Estimated function values")
for i in range(len(func_names)):
barplot_err(x="Function", y="Estimate", legend_loc=3, yerr="Error", hue="Estimator",
capsize=.2, data=df.loc[df["Function"] == func_names[i]], ax=axs[i])
if i == 0:
axs[i].set_ylabel("Function Estimate")
else:
axs[i].set_ylabel("")
axs[i].set_title(func_names[i])
axs[i].set_xticklabels("")
axs[i].set_xlabel("")
if save_pdf:
plt.savefig(results_folder_path + "func_estimates.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plot_validation_charts_sensitivity_comparison(results_folder_path, results_folder_path2, func_names, parameter_list,
parameter_labels,
exact_sens_estimates,
save_pdf=False):
filename1 = results_folder_path + "DNN_Sens_Values.txt"
filename2 = results_folder_path2 + "DNN_Sens_Values.txt"
filename3 = results_folder_path + "BPA_Sens_Values_exact.txt"
if exact_sens_estimates is not None:
sens_exact2 = pd.DataFrame(exact_sens_estimates, columns=func_names)
func_names.insert(0, "Parameter")
else:
func_names.insert(0, "Parameter")
sens_exact2 = | pd.read_csv(filename3, delimiter=",", names=func_names, skiprows=3) | pandas.read_csv |
"""Tools for the spatial analysis of neighborhood change."""
import esda
import numpy as np
import pandas as pd
import geopandas as gpd
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_samples
from libpysal.weights import attach_islands
from libpysal.weights.contiguity import Queen, Rook, Voronoi
from libpysal.weights.distance import KNN, DistanceBand
from libpysal.weights import lag_categorical
from .._data import _Map
from .cluster import (
affinity_propagation,
azp,
gaussian_mixture,
hdbscan,
kmeans,
max_p,
skater,
spectral,
spenc,
ward,
ward_spatial,
)
np.seterr(divide='ignore', invalid='ignore')
Ws = {
"queen": Queen,
"rook": Rook,
"voronoi": Voronoi,
"knn": KNN,
"distanceband": DistanceBand,
}
class ModelResults:
"""Stores data about cluster and cluster_spatial models.
Attributes
----------
X: array-like
data used to compute model
columns: list-like
columns used in model
W: libpysal.weights.W
libpysal spatial weights matrix used in model
labels: array-like
labels of each column
instance: instance of model class used to generate neighborhood labels.
fitted model instance, e.g sklearn.cluster.AgglomerativeClustering object
or other model class used to estimate class labels
nearest_labels: dataframe
container for dataframe of nearest_labels
silhouettes: dataframe
container for dataframe of silhouette scores
path_silhouettes: dataframe
container for dataframe of path_silhouette scores
boundary_silhouettes: dataframe
container for dataframe of boundary_silhouette scores
model_type: string
says whether the model is spatial or aspatial (contains a W object)
"""
def __init__(self, X, columns, labels, instance, W):
"""Initialize a new ModelResults instance.
Parameters
----------
X: array-like
data of the cluster
columns: list-like
columns used to compute model
W: libpysal.weights.W
libpysal spatial weights matrix used in model
labels: array-like
labels of each column
instance: AgglomerativeCluserting object, or other model specific object type
how many clusters model was computed with
nearest_labels: dataframe
container for dataframe of nearest_labels
silhouettes: dataframe
container for dataframe of silhouette scores
path_silhouettes: dataframe
container for dataframe of path_silhouette scores
boundary_silhouettes: dataframe
container for dataframe of boundary_silhouette scores
model_type: string
says whether the model is spatial or aspatial (contains a W object)
"""
self.columns = columns
self.X = X
self.W = W
self.instance = instance
self.labels = labels
self.nearest_labels = None
self.silhouettes = None
self.path_silhouettes = None
self.boundary_silhouettes = None
if self.W is None:
self.model_type = "aspatial"
else:
self.model_type = "spatial"
# Standalone funcs to calc these if you don't want to graph them
def sil_scores(self, **kwargs):
"""
Calculate silhouette scores for the current model.
Returns
-------
silhouette scores stored in a dataframe accessible from `comm.models.['model_name'].silhouettes`
"""
self.silhouettes = pd.DataFrame()
self.silhouettes["silhouettes"] = silhouette_samples(
self.X.values, self.labels, **kwargs
)
self.silhouettes.index = self.X.index
return self.silhouettes
def nearest_label(self, **kwargs):
"""
Calculate nearest_labels for the current model.
Returns
-------
nearest_labels stored in a dataframe accessible from:
`comm.models.['model_name'].nearest_labels`
"""
self.nearest_labels = pd.DataFrame()
self.nearest_labels["nearest_label"] = esda.nearest_label(
self.X.values, self.labels, **kwargs
)
self.nearest_labels.index = self.X.index
return self.nearest_labels
def boundary_sil(self, **kwargs):
"""
Calculate boundary silhouette scores for the current model.
Returns
-------
boundary silhouette scores stored in a dataframe accessible from:
`comm.models.['model_name'].boundary_silhouettes`
"""
assert self.model_type == "spatial", (
"Model is aspatial (lacks a W object), but has been passed to a spatial diagnostic."
" Try aspatial diagnostics like nearest_label() or sil_scores()"
)
self.boundary_silhouettes = pd.DataFrame()
self.boundary_silhouettes["boundary_silhouettes"] = esda.boundary_silhouette(
self.X.values, self.labels, self.W, **kwargs
)
self.boundary_silhouettes.index = self.X.index
return self.boundary_silhouettes
def path_sil(self, **kwargs):
"""
Calculate path silhouette scores for the current model.
Returns
-------
path silhouette scores stored in a dataframe accessible from:
`comm.models.['model_name'].path_silhouettes`
"""
assert self.model_type == "spatial", (
"Model is aspatial(lacks a W object), but has been passed to a spatial diagnostic."
" Try aspatial diagnostics like nearest_label() or sil_scores()"
)
self.path_silhouettes = pd.DataFrame()
self.path_silhouettes["path_silhouettes"] = esda.path_silhouette(
self.X.values, self.labels, self.W, **kwargs
)
self.path_silhouettes.index = self.X.index
return self.path_silhouettes
def cluster(
gdf,
n_clusters=6,
method=None,
best_model=False,
columns=None,
verbose=False,
time_var="year",
id_var="geoid",
scaler="std",
pooling="fixed",
**kwargs,
):
"""Create a geodemographic typology by running a cluster analysis on the study area's neighborhood attributes.
Parameters
----------
gdf : geopandas.GeoDataFrame, required
long-form GeoDataFrame containing neighborhood attributes
n_clusters : int, required
the number of clusters to model. The default is 6).
method : str in ['kmeans', 'ward', 'affinity_propagation', 'spectral','gaussian_mixture', 'hdbscan'], required
the clustering algorithm used to identify neighborhood types
best_model : bool, optional
if using a gaussian mixture model, use BIC to choose the best
n_clusters. (the default is False).
columns : list-like, required
subset of columns on which to apply the clustering
verbose : bool, optional
whether to print warning messages (the default is False).
time_var : str, optional
which column on the dataframe defines time and or sequencing of the
long-form data. Default is "year"
id_var : str, optional
which column on the long-form dataframe identifies the stable units
over time. In a wide-form dataset, this would be the unique index
scaler : None or scaler from sklearn.preprocessing, optional
a scikit-learn preprocessing class that will be used to rescale the
data. Defaults to sklearn.preprocessing.StandardScaler
pooling : ["fixed", "pooled", "unique"], optional (default='fixed')
How to treat temporal data when applying scaling. Options include:
* fixed : scaling is fixed to each time period
* pooled : data are pooled across all time periods
* unique : if scaling, apply the scaler to each time period, then generate
clusters unique to each time period.
Returns
-------
gdf : geopandas.GeoDataFrame
GeoDataFrame with a column of neighborhood cluster labels
appended as a new column. If cluster method exists as a column on the DataFrame
then the column will be incremented.
model : named tuple
A tuple with attributes X, columns, labels, instance, W, which store the
input matrix, column labels, fitted model instance, and spatial weights matrix
model_name : str
name of model to be stored in a Community
"""
specification = {
"ward": ward,
"kmeans": kmeans,
"affinity_propagation": affinity_propagation,
"gaussian_mixture": gaussian_mixture,
"spectral": spectral,
"hdbscan": hdbscan,
}
if scaler == "std":
scaler = StandardScaler()
if method not in specification.keys():
raise ValueError(
"`method` must of one of ['kmeans', 'ward', 'affinity_propagation', 'spectral', 'gaussian_mixture', 'hdbscan']"
)
# if we already have a column named after the clustering method, then increment it.
if method in gdf.columns.tolist():
model_name = method + str(len(gdf.columns[gdf.columns.str.startswith(method)]))
else:
model_name = method
if not columns:
raise ValueError("You must provide a subset of columns as input")
times = gdf[time_var].unique()
gdf = gdf.set_index([time_var, id_var])
# this is the dataset we'll operate on
data = gdf.copy()[columns]
data = data.dropna(how="any", subset=columns)
if scaler:
if pooling in ["fixed", "unique"]:
# if fixed (or unique), scale within each time period
for time in times:
data.loc[time] = scaler.fit_transform(data.loc[time].values)
elif pooling == "pooled":
# if pooled, scale the whole series at once
data.loc[:, columns] = scaler.fit_transform(data.values)
# the rescalar can create nans if a column has no variance, so fill with 0
data = data.fillna(0)
if pooling != "unique":
# run the cluster model then join the labels back to the original data
model = specification[method](
data,
n_clusters=n_clusters,
best_model=best_model,
verbose=verbose,
**kwargs,
)
labels = model.labels_.astype(str)
data = data.reset_index()
clusters = pd.DataFrame(
{model_name: labels, time_var: data[time_var], id_var: data[id_var]}
)
clusters.set_index([time_var, id_var], inplace=True)
clusters = clusters[~clusters.index.duplicated(keep="first")]
gdf = gdf.join(clusters, how="left")
gdf = gdf.reset_index()
results = ModelResults(
X=data.set_index([id_var, time_var]),
columns=columns,
labels=model.labels_,
instance=model,
W=None,
)
return gdf, results, model_name
elif pooling == "unique":
models = _Map()
gdf[model_name] = np.nan
data = data.reset_index()
for time in times:
df = data[data[time_var] == time]
model = specification[method](
df[columns],
n_clusters=n_clusters,
best_model=best_model,
verbose=verbose,
**kwargs,
)
labels = model.labels_.astype(str)
clusters = pd.DataFrame(
{model_name: labels, time_var: time, id_var: df[id_var]}
)
clusters.set_index([time_var, id_var], inplace=True)
gdf.update(clusters)
results = ModelResults(
X=df.set_index([id_var, time_var]),
columns=columns,
labels=model.labels_,
instance=model,
W=None,
)
models[time] = results
gdf = gdf.reset_index()
return gdf, models, model_name
def cluster_spatial(
gdf,
n_clusters=6,
spatial_weights="rook",
method=None,
columns=None,
threshold_variable="count",
threshold=10,
time_var="year",
id_var="geoid",
scaler="std",
weights_kwargs=None,
**kwargs,
):
"""Create a *spatial* geodemographic typology by running a cluster
analysis on the metro area's neighborhood attributes and including a
contiguity constraint.
Parameters
----------
gdf : geopandas.GeoDataFrame
long-form geodataframe holding neighborhood attribute and geometry data.
n_clusters : int
the number of clusters to model. The default is 6).
spatial_weights : ['queen', 'rook'] or libpysal.weights.W object
spatial weights matrix specification`. By default, geosnap will calculate Rook
weights, but you can also pass a libpysal.weights.W object for more control
over the specification.
method : str in ['ward_spatial', 'spenc', 'skater', 'azp', 'max_p']
the clustering algorithm used to identify neighborhood types
columns : array-like
subset of columns on which to apply the clustering
threshold_variable : str
for max-p, which variable should define `p`. The default is "count",
which will grow regions until the threshold number of polygons have
been aggregated
threshold : numeric
threshold to use for max-p clustering (the default is 10).
time_var : str
which column on the dataframe defines time and or sequencing of the
long-form data. Default is "year"
id_var : str
which column on the long-form dataframe identifies the stable units
over time. In a wide-form dataset, this would be the unique index
weights_kwargs : dict
If passing a libpysal.weights.W instance to spatial_weights, these additional
keyword arguments that will be passed to the weights constructor
scaler : None or scaler class from sklearn.preprocessing
a scikit-learn preprocessing class that will be used to rescale the
data. Defaults to sklearn.preprocessing.StandardScaler
Returns
-------
gdf : geopandas.GeoDataFrame
GeoDataFrame with a column of neighborhood cluster labels
appended as a new column. If cluster method exists as a column on the DataFrame
then the column will be incremented.
models : dict of named tuples
tab-completable dictionary of named tuples keyed on the Community's time variable
(e.g. year). The tuples store model results and have attributes X, columns, labels,
instance, W, which store the input matrix, column labels, fitted model instance,
and spatial weights matrix
model_name : str
name of model to be stored in a Community
"""
specification = {
"azp": azp,
"spenc": spenc,
"ward_spatial": ward_spatial,
"skater": skater,
"max_p": max_p,
}
if method not in specification.keys():
raise ValueError(
"`method` must be one of ['ward_spatial', 'spenc', 'skater', 'azp', 'max_p']"
)
if method in gdf.columns.tolist():
model_name = method + str(len(gdf.columns[gdf.columns.str.startswith(method)]))
else:
model_name = method
if not columns:
raise ValueError("You must provide a subset of columns as input")
if not method:
raise ValueError("You must choose a clustering algorithm to use")
if scaler == "std":
scaler = StandardScaler()
times = gdf[time_var].unique()
gdf = gdf.set_index([time_var, id_var])
# this is the dataset we'll operate on
data = gdf.copy()[columns + ["geometry"]]
contiguity_weights = {"queen": Queen, "rook": Rook}
if spatial_weights in contiguity_weights.keys():
W = contiguity_weights[spatial_weights]
else:
W = spatial_weights
models = _Map()
ws = {}
clusters = []
gdf[model_name] = np.nan
# loop over each time period, standardize the data and build a weights matrix
for time in times:
df = data.loc[time].dropna(how="any", subset=columns).reset_index()
df[time_var] = time
if scaler:
df[columns] = scaler.fit_transform(df[columns].values)
if weights_kwargs:
w0 = W.from_dataframe(df, **weights_kwargs)
else:
w0 = W.from_dataframe(df)
w1 = KNN.from_dataframe(df, k=1)
ws = [w0, w1]
if threshold_variable and threshold_variable != "count":
data[threshold_variable] = gdf[threshold_variable]
threshold_var = data.threshold_variable.values
ws[0] = attach_islands(ws[0], ws[1])
elif threshold_variable == "count":
threshold_var = np.ones(len(data.loc[time]))
ws[0] = attach_islands(ws[0], ws[1])
else:
threshold_var = None
model = specification[method](
df[columns],
w=ws[0],
n_clusters=n_clusters,
threshold_variable=threshold_var,
threshold=threshold,
**kwargs,
)
labels = model.labels_.astype(str)
clusters = pd.DataFrame(
{model_name: labels, time_var: df[time_var], id_var: df[id_var]}
)
clusters = clusters.drop_duplicates(subset=[id_var])
clusters.set_index([time_var, id_var], inplace=True)
gdf.update(clusters)
results = ModelResults(
X=df.set_index([id_var, time_var]).drop("geometry", axis=1),
columns=columns,
labels=model.labels_,
instance=model,
W=ws[0],
)
models[time] = results
gdf = gdf.reset_index()
return gdf, models, model_name
def predict_labels(
comm,
index_col='geoid',
time_col='year',
model_name=None,
w_type='queen',
w_options=None,
base_year=None,
new_colname=None,
time_steps=1,
increment=None,
seed=None
):
np.random.seed(seed)
if not new_colname:
new_colname = "predicted"
if not w_options:
w_options = {}
assert (
comm.harmonized
), "Predictions based on transition models require harmonized data"
assert (
model_name and model_name in comm.gdf.columns
), "You must provide the name of a cluster model present on the Community gdf"
assert base_year, "Missing `base_year`. You must provide an initial time point with labels to begin simulation"
gdf = comm.gdf.copy()
gdf = gdf.dropna(subset=[model_name]).reset_index(drop=True)
t = comm.transition(model_name, w_type=w_type)
if time_steps == 1:
gdf = gdf[gdf[time_col] == base_year].reset_index(drop=True)
w = Ws[w_type].from_dataframe(gdf, **w_options)
lags = lag_categorical(w, gdf[model_name].values)
lags = lags.astype(int)
labels = {}
for i, cluster in gdf[model_name].astype(int).iteritems():
probs = np.nan_to_num(t.P)[lags[i]][cluster]
probs /= (
probs.sum()
) # correct for tolerance, see https://stackoverflow.com/questions/25985120/numpy-1-9-0-valueerror-probabilities-do-not-sum-to-1
try:
# in case obs have a modal neighbor never before seen in the model
# (so all transition probs are 0)
# fall back to the aspatial transition matrix
labels[i] = np.random.choice(t.classes, p=probs)
except:
labels[i] = np.random.choice(t.classes, p=t.p[cluster])
labels = pd.Series(labels, name=new_colname)
out = gdf[[index_col, "geometry"]]
predicted = pd.concat([labels, out], axis=1)
return gpd.GeoDataFrame(predicted)
else:
assert (
increment
), "You must set the `increment` argument to simulate multiple time steps"
predictions = []
gdf = gdf[gdf[time_col] == base_year]
gdf = gdf[[index_col, model_name, time_col, "geometry"]]
current_time = base_year + increment
gdf = gdf.dropna(subset=[model_name]).reset_index(drop=True)
w = Ws[w_type].from_dataframe(gdf, **w_options)
predictions.append(gdf)
for step in range(time_steps):
# use the last known set of labels to get the spatial context for each geog unit
gdf = predictions[step - 1].copy()
lags = lag_categorical(w, gdf[model_name].values)
lags = lags.astype(int)
labels = {}
for i, cluster in gdf[model_name].astype(int).iteritems():
# use labels and spatial context to get the transition probabilities for each unit
probs = np.nan_to_num(t.P)[lags[i]][cluster]
probs /= (
probs.sum()
) # correct for tolerance, see https://stackoverflow.com/questions/25985120/numpy-1-9-0-valueerror-probabilities-do-not-sum-to-1
try:
# draw from the conditional probabilities for each unit
# in case obs have a modal neighbor never before seen in the model
# (so all transition probs are 0)
# fall back to the aspatial transition matrix
labels[i] = np.random.choice(t.classes, p=probs)
except:
labels[i] = np.random.choice(t.classes, p=t.p[cluster])
labels = pd.Series(labels, name=model_name)
out = gdf[[index_col, "geometry"]]
out[time_col] = current_time
predicted = | pd.concat([labels, out], axis=1) | pandas.concat |
import unittest
import pandas as pd
import numpy as np
from pandas._testing import assert_frame_equal, assert_series_equal
from functions import duplicate_rows_per_column, trim_df, fix_sex, fix_na
class TestFunctions(unittest.TestCase):
def test_duplicate_rows(self):
df = pd.DataFrame(
{"country": ["FR", "CH", "US", "JP"],
"aggr": [np.nan, 3.0, np.nan, 2.0]})
dup = duplicate_rows_per_column(df, col="aggr")
want_df = pd.DataFrame(
{"country": ["FR", "CH", "US", "JP", "CH", "CH", "JP"],
"aggr":[np.nan] * 7})
assert_frame_equal(dup, want_df)
def test_duplicate_rows_one_aggr(self):
df = pd.DataFrame(
{"country": ["FR", "CH", "US"],
"aggr": [np.nan, 1.0, np.nan]})
dup = duplicate_rows_per_column(df, col="aggr")
want_df = pd.DataFrame(
{"country": ["FR", "CH", "US"],
"aggr":[np.nan] * 3})
assert_frame_equal(dup, want_df)
def test_duplicate_rows_empty(self):
df = pd.DataFrame(
{"country": ["FR", "CH", "US"],
"aggr":[np.nan] * 3})
dup = duplicate_rows_per_column(df, col="aggr")
want_df = pd.DataFrame(
{"country": ["FR", "CH", "US"],
"aggr":[np.nan] * 3})
assert_frame_equal(dup, want_df)
def test_trim_df(self):
df = pd.DataFrame({"country": ["FR ", " CH", " US "]})
dup = trim_df(df)
want_df = pd.DataFrame({"country": ["FR", "CH", "US"]})
assert_frame_equal(dup, want_df)
def test_fix_sex(self):
df = pd.DataFrame({"sex": ["NA", "", "M", "F", "male", "FEMALE"]})
dup = fix_sex(df.sex)
want_df = pd.DataFrame({"sex": ["NA", "", "male", "female", "male", "female"]})
assert_series_equal(dup, want_df.sex)
def test_fix_na(self):
df = pd.DataFrame({"country": ["NA", "na", "", "foo", "n/A", "N/A"]})
dup = fix_na(df.country)
want_df = | pd.DataFrame({"country": ["NA", "NA", "", "foo", "NA", "NA"]}) | pandas.DataFrame |
"""
This script plots the ARI and Runtime values obtained from graspyclust_experiments.py, autogmm_experiments.py, and mclust_experiments.r
It saves the figures as subset_abc.png and subset_def.png
"""
#%%
import numpy as np
from scipy.stats import mode
from scipy.stats import wilcoxon
from sklearn.metrics import adjusted_rand_score
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
#%%
print("Reading data...")
path = "/results/"
# read the data
mclust_s = pd.read_csv(path + "mclust_synthetic.csv")
mclust_s = mclust_s.loc[:, ["ARI", "Time"]]
mclust_s["Dataset"] = mclust_s.shape[0] * ["Synthetic"]
mclust_s["Algorithm"] = mclust_s.shape[0] * ["mclust"]
mclust_bc = pd.read_csv(path + "mclust_bc.csv")
mclust_bc = mclust_bc.loc[:, ["ARI", "Time"]]
mclust_bc["Dataset"] = mclust_bc.shape[0] * ["Breast Cancer"]
mclust_bc["Algorithm"] = mclust_bc.shape[0] * ["mclust"]
mclust_dro = | pd.read_csv(path + "mclust_drosophila.csv") | pandas.read_csv |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).cov(
other=other, pairwise=pairwise, ddof=ddof, **kwargs
)
)
)(self)
def rolling_aggregate(self, rolling_args, func, *args, **kwargs):
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).aggregate(func=func, *args, **kwargs)
),
new_index=self.index,
)
return self.__constructor__(new_modin_frame)
def unstack(self, level, fill_value):
if not isinstance(self.index, pandas.MultiIndex) or (
isinstance(self.index, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
):
axis = 1
new_columns = ["__reduced__"]
need_reindex = True
else:
axis = 0
new_columns = None
need_reindex = False
def map_func(df):
return pandas.DataFrame(df.unstack(level=level, fill_value=fill_value))
def is_tree_like_or_1d(calc_index, valid_index):
if not isinstance(calc_index, pandas.MultiIndex):
return True
actual_len = 1
for lvl in calc_index.levels:
actual_len *= len(lvl)
return len(self.index) * len(self.columns) == actual_len * len(valid_index)
is_tree_like_or_1d_index = is_tree_like_or_1d(self.index, self.columns)
is_tree_like_or_1d_cols = is_tree_like_or_1d(self.columns, self.index)
is_all_multi_list = False
if (
isinstance(self.index, pandas.MultiIndex)
and isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.index.nlevels
and is_tree_like_or_1d_index
and is_tree_like_or_1d_cols
):
is_all_multi_list = True
real_cols_bkp = self.columns
obj = self.copy()
obj.columns = np.arange(len(obj.columns))
else:
obj = self
new_modin_frame = obj._modin_frame._apply_full_axis(
axis, map_func, new_columns=new_columns
)
result = self.__constructor__(new_modin_frame)
def compute_index(index, columns, consider_index=True, consider_columns=True):
def get_unique_level_values(index):
return [
index.get_level_values(lvl).unique()
for lvl in np.arange(index.nlevels)
]
new_index = (
get_unique_level_values(index)
if consider_index
else index
if isinstance(index, list)
else [index]
)
new_columns = (
get_unique_level_values(columns) if consider_columns else [columns]
)
return pandas.MultiIndex.from_product([*new_columns, *new_index])
if is_all_multi_list and is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
result = result.sort_index()
index_level_values = [lvl for lvl in obj.index.levels]
result.index = compute_index(
index_level_values, real_cols_bkp, consider_index=False
)
return result
if need_reindex:
if is_tree_like_or_1d_index and is_tree_like_or_1d_cols:
is_recompute_index = isinstance(self.index, pandas.MultiIndex)
is_recompute_columns = not is_recompute_index and isinstance(
self.columns, pandas.MultiIndex
)
new_index = compute_index(
self.index, self.columns, is_recompute_index, is_recompute_columns
)
elif is_tree_like_or_1d_index != is_tree_like_or_1d_cols:
if isinstance(self.columns, pandas.MultiIndex) or not isinstance(
self.index, pandas.MultiIndex
):
return result
else:
index = (
self.index.sortlevel()[0]
if is_tree_like_or_1d_index
and not is_tree_like_or_1d_cols
and isinstance(self.index, pandas.MultiIndex)
else self.index
)
index = pandas.MultiIndex.from_tuples(
list(index) * len(self.columns)
)
columns = self.columns.repeat(len(self.index))
index_levels = [
index.get_level_values(i) for i in range(index.nlevels)
]
new_index = pandas.MultiIndex.from_arrays(
[columns] + index_levels,
names=self.columns.names + self.index.names,
)
else:
return result
result = result.reindex(0, new_index)
return result
def stack(self, level, dropna):
if not isinstance(self.columns, pandas.MultiIndex) or (
isinstance(self.columns, pandas.MultiIndex)
and is_list_like(level)
and len(level) == self.columns.nlevels
):
new_columns = ["__reduced__"]
else:
new_columns = None
new_modin_frame = self._modin_frame._apply_full_axis(
1,
lambda df: pandas.DataFrame(df.stack(level=level, dropna=dropna)),
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
# Map partitions operations
# These operations are operations that apply a function to every partition.
abs = MapFunction.register(pandas.DataFrame.abs, dtypes="copy")
applymap = MapFunction.register(pandas.DataFrame.applymap)
conj = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(np.conj(df))
)
invert = MapFunction.register(pandas.DataFrame.__invert__)
isin = MapFunction.register(pandas.DataFrame.isin, dtypes=np.bool)
isna = MapFunction.register(pandas.DataFrame.isna, dtypes=np.bool)
negative = MapFunction.register(pandas.DataFrame.__neg__)
notna = MapFunction.register(pandas.DataFrame.notna, dtypes=np.bool)
round = MapFunction.register(pandas.DataFrame.round)
replace = MapFunction.register(pandas.DataFrame.replace)
series_view = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
df.squeeze(axis=1).view(*args, **kwargs)
)
)
to_numeric = MapFunction.register(
lambda df, *args, **kwargs: pandas.DataFrame(
pandas.to_numeric(df.squeeze(axis=1), *args, **kwargs)
)
)
def repeat(self, repeats):
def map_fn(df):
return pandas.DataFrame(df.squeeze(axis=1).repeat(repeats))
if isinstance(repeats, int) or (is_list_like(repeats) and len(repeats) == 1):
return MapFunction.register(map_fn, validate_index=True)(self)
else:
return self.__constructor__(self._modin_frame._apply_full_axis(0, map_fn))
# END Map partitions operations
# String map partitions operations
str_capitalize = MapFunction.register(_str_map("capitalize"), dtypes="copy")
str_center = MapFunction.register(_str_map("center"), dtypes="copy")
str_contains = MapFunction.register(_str_map("contains"), dtypes=np.bool)
str_count = MapFunction.register(_str_map("count"), dtypes=int)
str_endswith = MapFunction.register(_str_map("endswith"), dtypes=np.bool)
str_find = MapFunction.register(_str_map("find"), dtypes="copy")
str_findall = MapFunction.register(_str_map("findall"), dtypes="copy")
str_get = MapFunction.register(_str_map("get"), dtypes="copy")
str_index = MapFunction.register(_str_map("index"), dtypes="copy")
str_isalnum = MapFunction.register(_str_map("isalnum"), dtypes=np.bool)
str_isalpha = MapFunction.register(_str_map("isalpha"), dtypes=np.bool)
str_isdecimal = MapFunction.register(_str_map("isdecimal"), dtypes=np.bool)
str_isdigit = MapFunction.register(_str_map("isdigit"), dtypes=np.bool)
str_islower = MapFunction.register(_str_map("islower"), dtypes=np.bool)
str_isnumeric = MapFunction.register(_str_map("isnumeric"), dtypes=np.bool)
str_isspace = MapFunction.register(_str_map("isspace"), dtypes=np.bool)
str_istitle = MapFunction.register(_str_map("istitle"), dtypes=np.bool)
str_isupper = MapFunction.register(_str_map("isupper"), dtypes=np.bool)
str_join = MapFunction.register(_str_map("join"), dtypes="copy")
str_len = MapFunction.register(_str_map("len"), dtypes=int)
str_ljust = MapFunction.register(_str_map("ljust"), dtypes="copy")
str_lower = MapFunction.register(_str_map("lower"), dtypes="copy")
str_lstrip = MapFunction.register(_str_map("lstrip"), dtypes="copy")
str_match = MapFunction.register(_str_map("match"), dtypes="copy")
str_normalize = MapFunction.register(_str_map("normalize"), dtypes="copy")
str_pad = MapFunction.register(_str_map("pad"), dtypes="copy")
str_partition = MapFunction.register(_str_map("partition"), dtypes="copy")
str_repeat = MapFunction.register(_str_map("repeat"), dtypes="copy")
str_replace = MapFunction.register(_str_map("replace"), dtypes="copy")
str_rfind = MapFunction.register(_str_map("rfind"), dtypes="copy")
str_rindex = MapFunction.register(_str_map("rindex"), dtypes="copy")
str_rjust = MapFunction.register(_str_map("rjust"), dtypes="copy")
str_rpartition = MapFunction.register(_str_map("rpartition"), dtypes="copy")
str_rsplit = MapFunction.register(_str_map("rsplit"), dtypes="copy")
str_rstrip = MapFunction.register(_str_map("rstrip"), dtypes="copy")
str_slice = MapFunction.register(_str_map("slice"), dtypes="copy")
str_slice_replace = MapFunction.register(_str_map("slice_replace"), dtypes="copy")
str_split = MapFunction.register(_str_map("split"), dtypes="copy")
str_startswith = MapFunction.register(_str_map("startswith"), dtypes=np.bool)
str_strip = MapFunction.register(_str_map("strip"), dtypes="copy")
str_swapcase = MapFunction.register(_str_map("swapcase"), dtypes="copy")
str_title = MapFunction.register(_str_map("title"), dtypes="copy")
str_translate = MapFunction.register(_str_map("translate"), dtypes="copy")
str_upper = MapFunction.register(_str_map("upper"), dtypes="copy")
str_wrap = MapFunction.register(_str_map("wrap"), dtypes="copy")
str_zfill = MapFunction.register(_str_map("zfill"), dtypes="copy")
# END String map partitions operations
def unique(self):
"""Return unique values of Series object.
Returns
-------
ndarray
The unique values returned as a NumPy array.
"""
new_modin_frame = self._modin_frame._apply_full_axis(
0,
lambda x: x.squeeze(axis=1).unique(),
new_columns=self.columns,
)
return self.__constructor__(new_modin_frame)
def searchsorted(self, **kwargs):
"""
Return a QueryCompiler with value/values indicies, which they should be inserted
to maintain order of the passed Series.
Returns
-------
PandasQueryCompiler
"""
def map_func(part, *args, **kwargs):
elements_number = len(part.index)
assert elements_number > 0, "Wrong mapping behaviour of MapReduce"
# unify value type
value = kwargs.pop("value")
value = np.array([value]) if is_scalar(value) else value
if elements_number == 1:
part = part[part.columns[0]]
else:
part = part.squeeze()
part_index_start = part.index.start
part_index_stop = part.index.stop
result = part.searchsorted(value=value, *args, **kwargs)
processed_results = {}
value_number = 0
for value_result in result:
value_result += part_index_start
if value_result > part_index_start and value_result < part_index_stop:
processed_results[f"value{value_number}"] = {
"relative_location": "current_partition",
"index": value_result,
}
elif value_result <= part_index_start:
processed_results[f"value{value_number}"] = {
"relative_location": "previoius_partitions",
"index": part_index_start,
}
else:
processed_results[f"value{value_number}"] = {
"relative_location": "next_partitions",
"index": part_index_stop,
}
value_number += 1
return pandas.DataFrame(processed_results)
def reduce_func(map_results, *args, **kwargs):
def get_value_index(value_result):
value_result_grouped = value_result.groupby(level=0)
rel_location = value_result_grouped.get_group("relative_location")
ind = value_result_grouped.get_group("index")
# executes if result is inside of the mapped part
if "current_partition" in rel_location.values:
assert (
rel_location[rel_location == "current_partition"].count() == 1
), "Each value should have single result"
return ind[rel_location.values == "current_partition"]
# executes if result is between mapped parts
elif rel_location.nunique(dropna=False) > 1:
return ind[rel_location.values == "previoius_partitions"][0]
# executes if result is outside of the mapped part
else:
if "next_partitions" in rel_location.values:
return ind[-1]
else:
return ind[0]
map_results_parsed = map_results.apply(
lambda ser: get_value_index(ser)
).squeeze()
if isinstance(map_results_parsed, pandas.Series):
map_results_parsed = map_results_parsed.to_list()
return pandas.Series(map_results_parsed)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# Dt map partitions operations
dt_date = MapFunction.register(_dt_prop_map("date"))
dt_time = MapFunction.register(_dt_prop_map("time"))
dt_timetz = MapFunction.register(_dt_prop_map("timetz"))
dt_year = MapFunction.register(_dt_prop_map("year"))
dt_month = MapFunction.register(_dt_prop_map("month"))
dt_day = MapFunction.register(_dt_prop_map("day"))
dt_hour = MapFunction.register(_dt_prop_map("hour"))
dt_minute = MapFunction.register(_dt_prop_map("minute"))
dt_second = MapFunction.register(_dt_prop_map("second"))
dt_microsecond = MapFunction.register(_dt_prop_map("microsecond"))
dt_nanosecond = MapFunction.register(_dt_prop_map("nanosecond"))
dt_week = MapFunction.register(_dt_prop_map("week"))
dt_weekofyear = MapFunction.register(_dt_prop_map("weekofyear"))
dt_dayofweek = MapFunction.register(_dt_prop_map("dayofweek"))
dt_weekday = MapFunction.register(_dt_prop_map("weekday"))
dt_dayofyear = MapFunction.register(_dt_prop_map("dayofyear"))
dt_quarter = MapFunction.register(_dt_prop_map("quarter"))
dt_is_month_start = MapFunction.register(_dt_prop_map("is_month_start"))
dt_is_month_end = MapFunction.register(_dt_prop_map("is_month_end"))
dt_is_quarter_start = MapFunction.register(_dt_prop_map("is_quarter_start"))
dt_is_quarter_end = MapFunction.register(_dt_prop_map("is_quarter_end"))
dt_is_year_start = MapFunction.register(_dt_prop_map("is_year_start"))
dt_is_year_end = MapFunction.register(_dt_prop_map("is_year_end"))
dt_is_leap_year = MapFunction.register(_dt_prop_map("is_leap_year"))
dt_daysinmonth = MapFunction.register(_dt_prop_map("daysinmonth"))
dt_days_in_month = MapFunction.register(_dt_prop_map("days_in_month"))
dt_tz = MapReduceFunction.register(
_dt_prop_map("tz"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_freq = MapReduceFunction.register(
_dt_prop_map("freq"), lambda df: pandas.DataFrame(df.iloc[0]), axis=0
)
dt_to_period = MapFunction.register(_dt_func_map("to_period"))
dt_to_pydatetime = MapFunction.register(_dt_func_map("to_pydatetime"))
dt_tz_localize = MapFunction.register(_dt_func_map("tz_localize"))
dt_tz_convert = MapFunction.register(_dt_func_map("tz_convert"))
dt_normalize = MapFunction.register(_dt_func_map("normalize"))
dt_strftime = MapFunction.register(_dt_func_map("strftime"))
dt_round = MapFunction.register(_dt_func_map("round"))
dt_floor = MapFunction.register(_dt_func_map("floor"))
dt_ceil = MapFunction.register(_dt_func_map("ceil"))
dt_month_name = MapFunction.register(_dt_func_map("month_name"))
dt_day_name = MapFunction.register(_dt_func_map("day_name"))
dt_to_pytimedelta = MapFunction.register(_dt_func_map("to_pytimedelta"))
dt_total_seconds = MapFunction.register(_dt_func_map("total_seconds"))
dt_seconds = MapFunction.register(_dt_prop_map("seconds"))
dt_days = MapFunction.register(_dt_prop_map("days"))
dt_microseconds = MapFunction.register(_dt_prop_map("microseconds"))
dt_nanoseconds = MapFunction.register(_dt_prop_map("nanoseconds"))
dt_components = MapFunction.register(
_dt_prop_map("components"), validate_columns=True
)
dt_qyear = MapFunction.register(_dt_prop_map("qyear"))
dt_start_time = MapFunction.register(_dt_prop_map("start_time"))
dt_end_time = MapFunction.register(_dt_prop_map("end_time"))
dt_to_timestamp = MapFunction.register(_dt_func_map("to_timestamp"))
# END Dt map partitions operations
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
return self.__constructor__(self._modin_frame.astype(col_dtypes))
# Column/Row partitions reduce operations
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
def first_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.first_valid_index())
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, first_valid_index_builder)
)
.min(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
return df.set_axis(
pandas.RangeIndex(len(df.index)), axis="index", inplace=False
).apply(lambda df: df.last_valid_index())
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = (
self.__constructor__(
self._modin_frame._fold_reduce(0, last_valid_index_builder)
)
.max(axis=1)
.to_pandas()
.squeeze()
)
return self.index[first_result]
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
empty_df = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
)
def describe_builder(df, internal_indices=[]):
return df.iloc[:, internal_indices].describe(**kwargs)
return self.__constructor__(
self._modin_frame._apply_full_axis_select_indices(
0,
describe_builder,
empty_df.columns,
new_index=empty_df.index,
new_columns=empty_df.columns,
)
)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
cummax = FoldFunction.register(pandas.DataFrame.cummax)
cummin = FoldFunction.register(pandas.DataFrame.cummin)
cumsum = FoldFunction.register(pandas.DataFrame.cumsum)
cumprod = FoldFunction.register(pandas.DataFrame.cumprod)
diff = FoldFunction.register(pandas.DataFrame.diff)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
if is_list_like(lower) or is_list_like(upper):
new_modin_frame = self._modin_frame._fold(
axis, lambda df: df.clip(**kwargs)
)
else:
new_modin_frame = self._modin_frame._map(lambda df: df.clip(**kwargs))
return self.__constructor__(new_modin_frame)
def dot(self, other, squeeze_self=None, squeeze_other=None):
"""
Computes the matrix multiplication of self and other.
Parameters
----------
other : PandasQueryCompiler or NumPy array
The other query compiler or NumPy array to matrix multiply with self.
squeeze_self : boolean
The flag to squeeze self.
squeeze_other : boolean
The flag to squeeze other (this flag is applied if other is query compiler).
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the matrix multiply.
"""
if isinstance(other, PandasQueryCompiler):
other = (
other.to_pandas().squeeze(axis=1)
if squeeze_other
else other.to_pandas()
)
def map_func(df, other=other, squeeze_self=squeeze_self):
result = df.squeeze(axis=1).dot(other) if squeeze_self else df.dot(other)
if | is_list_like(result) | pandas.core.dtypes.common.is_list_like |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
def test_any_datetime(self):
# GH 23070
float_data = [1, np.nan, 3, np.nan]
datetime_data = [pd.Timestamp('1960-02-15'),
pd.Timestamp('1960-02-16'),
pd.NaT,
pd.NaT]
df = DataFrame({
"A": float_data,
"B": datetime_data
})
result = df.any(1)
expected = | Series([True, True, True, False]) | pandas.Series |
"""Console script for zalando_classification."""
import sys
import click
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
import tensorflow_probability as tfp
import pandas as pd
from gpdre import GaussianProcessDensityRatioEstimator
from gpdre.benchmarks import SugiyamaKrauledatMuellerDensityRatioMarginals
from gpdre.datasets import make_classification_dataset
from gpdre.base import MLPDensityRatioEstimator, LogisticRegressionDensityRatioEstimator
from gpdre.external.rulsif import RuLSIFDensityRatioEstimator
from gpdre.external.kliep import KLIEPDensityRatioEstimator
from gpdre.external.kmm import KMMDensityRatioEstimator
from gpdre.initializers import KMeans
from gpflow.models import SVGP
from gpflow.kernels import Matern52
from sklearn.linear_model import LogisticRegression
from pathlib import Path
K.set_floatx("float64")
# shortcuts
tfd = tfp.distributions
# sensible defaults
SUMMARY_DIR = "logs/"
SEED = 8888
dataset_seed = 8888
num_features = 2
num_samples = 1000
num_train = 500
num_test = 500
num_inducing_points = 300
optimizer = "adam"
epochs = 2000
batch_size = 100
buffer_size = 1000
jitter = 1e-6
num_seeds = 10
# properties of the distribution
props = {
"mean": tfd.Distribution.mean,
"mode": tfd.Distribution.mode,
"median": lambda d: d.distribution.quantile(0.5),
# "sample": tfd.Distribution.sample, # single sample
}
def class_posterior(x1, x2):
return 0.5 * (1 + tf.tanh(x1 - tf.nn.relu(-x2)))
def metric(X_train, y_train, X_test, y_test, sample_weight=None,
random_state=None):
model = LogisticRegression(C=1.0, random_state=random_state)
model.fit(X_train, y_train, sample_weight=sample_weight)
return model.score(X_test, y_test)
@click.command()
@click.argument("name")
@click.option("--summary-dir", default=SUMMARY_DIR,
type=click.Path(file_okay=False, dir_okay=True),
help="Summary directory.")
@click.option("-s", "--seed", default=SEED, type=int, help="Random seed")
def main(name, summary_dir, seed):
summary_path = Path(summary_dir).joinpath("sugiyama")
summary_path.mkdir(parents=True, exist_ok=True)
r = SugiyamaKrauledatMuellerDensityRatioMarginals()
rows = []
for seed in range(num_seeds):
# (X_train, y_train), (X_test, y_test) = r.train_test_split(X, y, seed=seed)
(X_train, y_train), (X_test, y_test) = r.make_covariate_shift_dataset(
num_test, num_train, class_posterior_fn=class_posterior, threshold=0.5,
seed=seed)
X, s = make_classification_dataset(X_test, X_train)
# Uniform
acc = metric(X_train, y_train, X_test, y_test, random_state=seed)
rows.append(dict(weight="uniform", acc=acc,
seed=seed, dataset_seed=seed))
# Exact
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=r.ratio(X_train).numpy(), random_state=seed)
rows.append(dict(weight="exact", acc=acc,
seed=seed, dataset_seed=seed))
# RuLSIF
r_rulsif = RuLSIFDensityRatioEstimator(alpha=1e-6)
r_rulsif.fit(X_test, X_train)
sample_weight = np.maximum(1e-6, r_rulsif.ratio(X_train))
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="rulsif", acc=acc,
seed=seed, dataset_seed=seed))
# KLIEP
# sigmas = [0.1, 0.25, 0.5, 0.75, 1.0]
sigmas = list(np.maximum(0.25 * np.arange(5), 0.1))
r_kliep = KLIEPDensityRatioEstimator(sigmas=sigmas, seed=seed)
r_kliep.fit(X_test, X_train)
sample_weight = np.maximum(1e-6, r_kliep.ratio(X_train))
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="kliep", acc=acc,
seed=seed, dataset_seed=seed))
# KMM
r_kmm = KMMDensityRatioEstimator(B=1000.0)
r_kmm.fit(X_test, X_train)
sample_weight = np.maximum(1e-6, r_kmm.ratio(X_train))
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="kmm", acc=acc,
seed=seed, dataset_seed=seed))
# Logistic Regression (Linear)
r_logreg = LogisticRegressionDensityRatioEstimator(seed=seed)
r_logreg.fit(X_test, X_train)
sample_weight = np.maximum(1e-6, r_logreg.ratio(X_train).numpy())
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="logreg", acc=acc,
seed=seed, dataset_seed=seed))
# Logistic Regression (MLP)
r_mlp = MLPDensityRatioEstimator(num_layers=1, num_units=8,
activation="tanh", seed=seed)
r_mlp.compile(optimizer=optimizer, metrics=["accuracy"])
r_mlp.fit(X_test, X_train, epochs=epochs, batch_size=batch_size)
sample_weight = np.maximum(1e-6, r_mlp.ratio(X_train).numpy())
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=sample_weight, random_state=seed)
rows.append(dict(weight="mlp", acc=acc,
seed=seed, dataset_seed=seed))
# Gaussian Processes
gpdre = GaussianProcessDensityRatioEstimator(
input_dim=num_features,
kernel_cls=Matern52,
num_inducing_points=num_inducing_points,
inducing_index_points_initializer=KMeans(X, seed=seed),
vgp_cls=SVGP,
whiten=True,
jitter=jitter,
seed=seed)
gpdre.compile(optimizer=optimizer)
gpdre.fit(X_test, X_train, epochs=epochs, batch_size=batch_size,
buffer_size=buffer_size)
for prop_name, prop in props.items():
r_prop = gpdre.ratio(X_train, convert_to_tensor_fn=prop)
acc = metric(X_train, y_train, X_test, y_test,
sample_weight=r_prop.numpy(), random_state=seed)
rows.append(dict(weight=prop_name, acc=acc,
seed=seed, dataset_seed=seed))
data = | pd.DataFrame(rows) | pandas.DataFrame |
import numpy as np
import os
from collections import OrderedDict
import pandas as pd
import copy
class SAGEIIILoaderV400(object):
def __init__(self):
self.data_folder = ''
self.data_format = self.get_data_format()
self.sage_ii_format = True
def get_data_format(self):
data_format = OrderedDict()
data_format['Event Identification Tag'] = (0,4,1,'int32')
data_format['Year-Day Tag'] = (4,8,1,'int32')
data_format['Instrument Elapsed Time in Orbit'] = (8,12,1,'int32')
data_format['Fill Value Int'] = (12,16,1,'int32')
data_format['Fill Value Float'] = (16,20,1,'float32')
data_format['Mission Identification'] = (20,24,1,'int32')
data_format['Version: Definitive Orbit Processing'] = (24,28,1,'float32')
data_format['Version: Level 0 Processing'] = (28,32,1,'float32')
data_format['Version: Software Processing'] = (32,36,1,'float32')
data_format['Version: Data Product'] = (36,40,1,'float32')
data_format['Version: Spectroscopy'] = (40,44,1,'float32')
data_format['Version: GRAM 95'] = (44,48,1,'float32')
data_format['Version: Meteorlogical'] = (48,52,1,'float32')
data_format['Altitude–Based Grid Spacing (km)'] = (52,56,1,'float32')
data_format['Number of Altitude–Based Array Values']= (56,60,1,'int32')
data_format['Number of Aerosol Channels'] = (60,64,1,'int32')
data_format['Number of Ground Track Values'] = (64,68,1,'int32')
data_format['Number of Aerosol Extinction Altitude Levels']= (68,72,1,'int32')
data_format['Spacecraft–Referenced Event Type'] = (72,76,1,'int32')
data_format['Earth–Referenced Event Type'] = (76,80,1,'int32')
data_format['Event Beta Angle'] = (80,84,1,'float32')
data_format['Event Status Bit Flags'] = (84,88,1,'int32')
data_format['Data Capture Start Date'] = (88,92,1,'int32')
data_format['Data Capture Start Time'] = (92,96,1,'int32')
data_format['Subtangent Start Latitude'] = (96,100,1,'float32')
data_format['Subtangent Start Longitude'] = (100,104,1,'float32')
data_format['Subtangent Start Altitude'] = (104,108,1,'float32')
data_format['Data Capture End Date'] = (108,112,1,'int32')
data_format['Data Capture End Time'] = (112,116,1,'int32')
data_format['Subtangent End Latitude'] = (116,120,1,'float32')
data_format['Subtangent End Longitude'] = (120,124,1,'float32')
data_format['Subtangent End Ray Path Direction'] = (124,128,1,'float32')
data_format['Date'] = (128,172,1,'int32')
data_format['Time'] = (172,216,1,'int32')
data_format['Subtangent Latitude'] = (216, 260, 1, 'float32')
data_format['Subtangent Longitude'] = (260, 304, 1, 'float32')
data_format['Subtangent Altitude'] = (304, 348, 1, 'float32')
data_format['Homogeneity Flags'] = (348, 1148, 1, 'int32')
data_format['Geometric Altitude'] = (1148, 1948, 1, 'float32')
data_format['Geopotential Altitude'] = (1948, 2748, 1, 'float32')
data_format['Temperature'] = (2748, 3548, 1, 'float32')
data_format['Temperature Uncertainty'] = (3548, 4348, 1, 'float32')
data_format['Pressure'] = (4348, 5148, 1, 'float32')
data_format['Pressure Uncertainty'] = (5148, 5948, 1, 'float32')
data_format['Pressure/Temperature Array Source Flags'] = (5948, 6748, 1, 'int32')
data_format['Tropopause Temperature'] = (6748, 6752, 1, 'float32')
data_format['Tropopause Geometric Altitude'] = (6752, 6756, 1, 'float32')
data_format['Composite Ozone Concentration'] = (6756, 7556, 1, 'float32')
data_format['Composite Ozone Concentration Uncertainty'] = (7556, 8356, 1, 'float32')
data_format['Composite Ozone Slant Path Column Density'] = (8356, 9156, 1, 'float32')
data_format['Composite Ozone Slant Path Column Density Uncertainty'] = (9156, 9956, 1, 'float32')
data_format['Composite Ozone QA Bit Flags '] = (9956, 10756, 1, 'int32')
data_format['Mesospheric Ozone Concentration'] = (10756, 11556, 1, 'float32')
data_format['Mesospheric Ozone Concentration Uncertainty'] = (11556, 12356, 1, 'float32')
data_format['Mesospheric Ozone Slant Path Column Density'] = (12356, 13156, 1, 'float32')
data_format['Mesospheric Ozone Slant Path Column Density Uncertainty'] = (13156, 13956, 1, 'float32')
data_format['Mesospheric Ozone QA Bit Flags '] = (13956, 14756, 1, 'int32')
data_format['MLR Ozone Concentration'] = (14756, 15556, 1, 'float32')
data_format['MLR Ozone Concentration Uncertainty'] = (15556, 16356, 1, 'float32')
data_format['MLR Ozone Slant Path Column Density'] = (16356, 17156, 1, 'float32')
data_format['MLR Ozone Slant Path Column Density Uncertainty'] = (17156, 17956, 1, 'float32')
data_format['MLR Ozone QA Bit Flags '] = (17956, 18756, 1, 'int32')
data_format['LSQ Ozone Concentration'] = (18756, 19556, 1, 'float32')
data_format['LSQ Ozone Concentration Uncertainty'] = (19556, 20356, 1, 'float32')
data_format['LSQ Ozone Slant Path Column Density'] = (20356, 21156, 1, 'float32')
data_format['LSQ Ozone Slant Path Column Density Uncertainty'] = (21156, 21956, 1, 'float32')
data_format['LSQ Ozone QA Bit Flags '] = (21956, 22756, 1, 'int32')
data_format['Water Vapor Concentration'] = (22756, 23556, 1, 'float32')
data_format['Water Vapor Concentration Uncertainty'] = (23556, 24356, 1, 'float32')
data_format['Water Vapor QA Bit Flags'] = (24356, 25156, 1, 'int32')
data_format['NO2 Concentration'] = (25156, 25956, 1, 'float32')
data_format['NO2 Concentration Uncertainty'] = (25956, 26756, 1, 'float32')
data_format['NO2 Slant Path Column Density'] = (26756, 27556, 1, 'float32')
data_format['NO2 Slant Path Column Density Uncertainty'] = (27556, 28356, 1, 'float32')
data_format['NO2 QA Bit Flags '] = (28356, 29156, 1, 'int32')
data_format['Retrieved Temperature'] = (29156, 29956, 1, 'float32')
data_format['Retrieved Temperature Uncertainty'] = (29956, 30756, 1, 'float32')
data_format['Retrieved Pressure'] = (30756, 31556, 1, 'float32')
data_format['Retrieved Pressure Uncertainty'] = (31556, 32356, 1, 'float32')
data_format['Retrieved Pressure/Temperature QA Bit Flags'] = (32356, 33156, 1, 'int32')
data_format['Aerosol Wavelengths'] = (33156, 33192, 1, 'float32')
data_format['Half–Bandwidths of Aerosol Channels'] = (33192, 33228, 1, 'float32')
data_format['Stratospheric Optical Depth'] = (33228, 33264, 1, 'float32')
data_format['Stratospheric Optical Depth Uncertainty'] = (33264, 33300, 1, 'float32')
data_format['Stratospheric Optical Depth QA Bit Flags'] = (33300, 33336, 1, 'int32')
data_format['Aerosol Extinction Channel 1'] = (33336, 33696, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 1'] = (33696, 34056, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 1'] = (34056, 34416, 1, 'int32')
data_format['Aerosol Extinction Channel 2'] = (34416, 34776, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 2'] = (34776, 35136, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 2'] = (35136, 35496, 1, 'int32')
data_format['Aerosol Extinction Channel 3'] = (35496, 35856, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 3'] = (35856, 36216, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 3'] = (36216, 36576, 1, 'int32')
data_format['Aerosol Extinction Channel 4'] = (36576, 36936, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 4'] = (36936, 37296, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 4'] = (37296, 37656, 1, 'int32')
data_format['Aerosol Extinction Channel 5'] = (37656, 38016, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 5'] = (38016, 38376, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 5'] = (38376, 38736, 1, 'int32')
data_format['Aerosol Extinction Channel 6'] = (38736, 39096, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 6'] = (39096, 39456, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 6'] = (39456, 39816, 1, 'int32')
data_format['Aerosol Extinction Channel 7'] = (39816, 40176, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 7'] = (40176, 40536, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 7'] = (40536, 34416, 1, 'int32')
data_format['Aerosol Extinction Channel 8'] = (40896, 41256, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 8'] = (41256, 41616, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 8'] = (41616, 34416, 1, 'int32')
data_format['Aerosol Extinction Channel 9'] = (41976, 42336, 1, 'float32')
data_format['Aerosol Extinction Uncertainty Channel 9'] = (42336, 42696, 1, 'float32')
data_format['Aerosol Extinction QA Bit Flags Channel 9'] = (42696, 43056, 1, 'int32')
data_format['Aerosol Spectral Dependence Flag'] = (43056, 43416, 1, 'float32')
data_format['1020nm/Rayleigh Extinction Ratio'] = (43416, 43776, 1, 'float32')
data_format['1020nm/Rayleigh Extinction Ratio Uncertainty'] = (43776, 44136, 1, 'float32')
data_format['1020nm/Rayleigh Extinction Ratio QA Bit Flags']= (44136, 44496, 1, 'int32')
return data_format
def load_file(self, file):
# load the file into the buffer
file_format = self.data_format
with open(file, "rb") as f:
buffer = f.read()
# load the data from the buffer
data = dict()
for key in self.data_format.keys():
dt = np.dtype(file_format[key][3])
dt = dt.newbyteorder('>')
try:
data[key] = copy.copy(np.frombuffer(buffer[file_format[key][0]:file_format[key][1]], dtype=dt))
except:
print(key)
# add some extra fields for convenience
lat = data['Subtangent Latitude']
lat[lat == data['Fill Value Float']] = np.nan
data['Lat'] = np.nanmean(lat)
lon = data['Subtangent Longitude']
lon[lon == data['Fill Value Float']] = np.nan
data['Lon'] = np.nanmean(lon)
# add a modified julian date and astropy object time fields
date = data['Date']
date = np.delete(date, np.where(date == data['Fill Value Int']))
year = [str(d) for d in np.asarray(date/10000, dtype=int)]
month = [str(d).zfill(2) for d in np.asarray(date % 10000 / 100, dtype=int)]
day = [str(d).zfill(2) for d in date % 100]
time = data['Time']
time = np.delete(time, np.where(time == data['Fill Value Int']))
hour = [str(t).zfill(2) for t in np.asarray(time/10000, dtype=int)]
minute = [str(t).zfill(2) for t in np.asarray(time/100, dtype=int) % 100]
second = [str(t).zfill(2) for t in time % 100]
time_str = [y + '-' + m + '-' + d + ' ' + h + ':' + mi + ':' + s for y,m,d,h,mi,s in zip(year,month,day,hour,minute,second)]
try:
# t = Time(time_str,format='iso')
t = pd.to_datetime(time_str)
except:
print('time error')
data['mjd'] = np.mean(np.array((t - | pd.Timestamp('1858-11-17') | pandas.Timestamp |
import os
import time
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.preprocessing import label_binarize
from util import util
from util import metrics
from torch.utils.tensorboard import SummaryWriter
class Visualizer:
"""
This class print/save logging information
"""
def __init__(self, param):
"""
Initialize the Visualizer class
"""
self.param = param
self.output_path = os.path.join(param.checkpoints_dir, param.experiment_name)
tb_dir = os.path.join(self.output_path, 'tb_log')
util.mkdir(tb_dir)
if param.isTrain:
# Create a logging file to store training losses
self.train_log_filename = os.path.join(self.output_path, 'train_log.txt')
with open(self.train_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Log ({:s}) -----------------------\n'.format(now))
self.train_summary_filename = os.path.join(self.output_path, 'train_summary.txt')
with open(self.train_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_train_dir = os.path.join(self.output_path, 'tb_log', 'train')
util.mkdir(tb_train_dir)
util.clear_dir(tb_train_dir)
# Create TensorBoard writer
self.train_writer = SummaryWriter(log_dir=tb_train_dir)
if param.isTest:
# Create a logging file to store testing metrics
self.test_log_filename = os.path.join(self.output_path, 'test_log.txt')
with open(self.test_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Log ({:s}) -----------------------\n'.format(now))
self.test_summary_filename = os.path.join(self.output_path, 'test_summary.txt')
with open(self.test_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_test_dir = os.path.join(self.output_path, 'tb_log', 'test')
util.mkdir(tb_test_dir)
util.clear_dir(tb_test_dir)
# Create TensorBoard writer
self.test_writer = SummaryWriter(log_dir=tb_test_dir)
def print_train_log(self, epoch, iteration, losses_dict, metrics_dict, load_time, comp_time, batch_size, dataset_size, with_time=True):
"""
print train log on console and save the message to the disk
Parameters:
epoch (int) -- current epoch
iteration (int) -- current training iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
load_time (float) -- data loading time per data point (normalized by batch_size)
comp_time (float) -- computational time per data point (normalized by batch_size)
batch_size (int) -- batch size of training
dataset_size (int) -- size of the training dataset
with_time (bool) -- print the running time or not
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
if with_time:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d} Load_t: {:.3f} Comp_t: {:.3f}] '.format(epoch, data_point_covered, load_time, comp_time)
else:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d}]\n'.format(epoch, data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message) # print the message
with open(self.train_log_filename, 'a') as log_file:
log_file.write(message + '\n') # save the message
def print_train_summary(self, epoch, losses_dict, output_dict, train_time, current_lr):
"""
print the summary of this training epoch
Parameters:
epoch (int) -- epoch number of this training model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
train_time (float) -- time used for training this epoch
current_lr (float) -- the learning rate of this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TRAIN] [Epoch: {:3d}]\n'.format(int(epoch))
for name, loss in losses_dict.items():
write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.train_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.train_writer.add_scalar('metric_'+name, metric, epoch)
train_time_msg = 'Training time used: {:.3f}s'.format(train_time)
print_message += '\n' + train_time_msg
with open(self.train_log_filename, 'a') as log_file:
log_file.write(train_time_msg + '\n')
current_lr_msg = 'Learning rate for this epoch: {:.7f}'.format(current_lr)
print_message += '\n' + current_lr_msg
self.train_writer.add_scalar('lr', current_lr, epoch)
with open(self.train_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
print(print_message)
def print_test_log(self, epoch, iteration, losses_dict, metrics_dict, batch_size, dataset_size):
"""
print performance metrics of this iteration on console and save the message to the disk
Parameters:
epoch (int) -- epoch number of this testing model
iteration (int) -- current testing iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
batch_size (int) -- batch size of testing
dataset_size (int) -- size of the testing dataset
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
message = '[TEST] [Epoch: {:3d} Iter: {:4d}] '.format(int(epoch), data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(message + '\n')
def print_test_summary(self, epoch, losses_dict, output_dict, test_time):
"""
print the summary of this testing epoch
Parameters:
epoch (int) -- epoch number of this testing model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
test_time (float) -- time used for testing this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TEST] [Epoch: {:3d}] '.format(int(epoch))
for name, loss in losses_dict.items():
# write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.test_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.test_writer.add_scalar('metric_' + name, metric, epoch)
with open(self.test_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
test_time_msg = 'Testing time used: {:.3f}s'.format(test_time)
print_message += '\n' + test_time_msg
print(print_message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(test_time_msg + '\n')
def get_epoch_metrics(self, output_dict):
"""
Get the downstream task metrics for whole epoch
Parameters:
output_dict (OrderedDict) -- the output dictionary used to compute the downstream task metrics
"""
if self.param.downstream_task == 'classification':
y_true = output_dict['y_true'].cpu().numpy()
y_true_binary = label_binarize(y_true, classes=range(self.param.class_num))
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
if self.param.class_num == 2:
y_prob = y_prob[:, 1]
accuracy = sk.metrics.accuracy_score(y_true, y_pred)
precision = sk.metrics.precision_score(y_true, y_pred, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true, y_pred, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true, y_pred, average='macro', zero_division=0)
try:
auc = sk.metrics.roc_auc_score(y_true_binary, y_prob, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'auc': auc}
elif self.param.downstream_task == 'regression':
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().detach().numpy()
mse = sk.metrics.mean_squared_error(y_true, y_pred)
rmse = sk.metrics.mean_squared_error(y_true, y_pred, squared=False)
mae = sk.metrics.mean_absolute_error(y_true, y_pred)
medae = sk.metrics.median_absolute_error(y_true, y_pred)
r2 = sk.metrics.r2_score(y_true, y_pred)
return {'mse': mse, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'survival':
metrics_start_time = time.time()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs}
elif self.param.downstream_task == 'multitask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
y_true_cla = output_dict['y_true_cla'].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num))
y_pred_cla = output_dict['y_pred_cla'].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'].cpu().numpy()
if self.param.class_num == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy = sk.metrics.accuracy_score(y_true_cla, y_pred_cla)
precision = sk.metrics.precision_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
'''
try:
auc = sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
'''
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'alltask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
accuracy = []
f1 = []
auc = []
for i in range(self.param.task_num - 2):
y_true_cla = output_dict['y_true_cla'][i].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num[i]))
y_pred_cla = output_dict['y_pred_cla'][i].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'][i].cpu().numpy()
if self.param.class_num[i] == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy.append(sk.metrics.accuracy_score(y_true_cla, y_pred_cla))
f1.append(sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0))
try:
auc.append(sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro'))
except ValueError:
auc.append(-1)
print('ValueError: ROC AUC score is not defined in this case.')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
# mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
# medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy_1': accuracy[0], 'f1_1': f1[0], 'auc_1': auc[0], 'accuracy_2': accuracy[1], 'f1_2': f1[1], 'auc_2': auc[1], 'accuracy_3': accuracy[2], 'f1_3': f1[2], 'auc_3': auc[2], 'accuracy_4': accuracy[3], 'f1_4': f1[3], 'auc_4': auc[3], 'accuracy_5': accuracy[4], 'f1_5': f1[4], 'auc_5': auc[4], 'rmse': rmse, 'r2': r2}
def save_output_dict(self, output_dict):
"""
Save the downstream task output to disk
Parameters:
output_dict (OrderedDict) -- the downstream task output dictionary to be saved
"""
down_path = os.path.join(self.output_path, 'down_output')
util.mkdir(down_path)
if self.param.downstream_task == 'classification':
# Prepare files
index = output_dict['index'].numpy()
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
sample_list = self.param.sample_list[index]
# Output files
y_df = | pd.DataFrame({'sample': sample_list, 'y_true': y_true, 'y_pred': y_pred}, index=index) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import cStringIO as StringIO
import nose
from numpy import nan
import numpy as np
import numpy.ma as ma
from pandas import Index, Series, TimeSeries, DataFrame, isnull, notnull
from pandas.core.index import MultiIndex
import pandas.core.datetools as datetools
from pandas.util import py3compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
#-------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEquals(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEquals(result.name, self.ts.name)
# def test_copy_index_name_checking(self):
# # don't want to be able to modify the index stored elsewhere after
# # making a copy
# self.ts.index.name = None
# cp = self.ts.copy()
# cp.index.name = 'foo'
# self.assert_(self.ts.index.name is None)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEquals(result.name, self.ts.name)
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEquals(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEquals(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assert_(result.name is None)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEquals(result.name, self.ts.name)
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEquals(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEquals(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEquals(result.name, self.ts.name)
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(range(0,len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth"]
expected = "\n".join(expected)
self.assertEquals(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEquals(result.name, s.name)
self.assertEquals(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
# test big series (diff code path)
s = Series(range(0,1000))
s.name = "test"
self.assert_("Name: test" in repr(s))
s.name = None
self.assert_(not "Name:" in repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip(self.ts)
self.assertEquals(unpickled.name, self.ts.name)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEquals(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEquals(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEquals(result.name, self.ts.name)
class SafeForSparse(object):
pass
class TestSeries(unittest.TestCase, CheckNameIntegration):
def setUp(self):
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=range(10))
empty2 = Series(np.nan, index=range(10))
assert_series_equal(empty, empty2)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dict(self):
d = {'a' : 0., 'b' : 1., 'c' : 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_(tm.is_sorted(series.index))
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : datetime.now()}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : 0, 'b' : '1', 'c' : '2', 'd' : '3'}
series = Series(data)
self.assert_(series.dtype == np.object_)
data = {'a' : '0', 'b' : '1'}
series = Series(data, dtype=float)
self.assert_(series.dtype == np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(AssertionError, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
self.assert_(isinstance(series.index, Index))
def test_array_finalize(self):
pass
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assert_(nans.dtype == np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assert_(strings.dtype == np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assert_(dates.dtype == np.object_)
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
obj.save('__tmp__')
unpickled = Series.load('__tmp__')
os.remove('__tmp__')
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assert_(self.series.get(-1) is None)
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
def test_iget(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
def test_getitem_regression(self):
s = Series(range(5), index=range(5))
result = s[range(5)]
assert_series_equal(result, s)
def test_getitem_slice_bug(self):
s = Series(range(10), range(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1,2,3]]
slice2 = self.objSeries[[1,2,3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_(np.array_equal(result.index, s.index[mask]))
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
cop = s.copy()
cop[omask] = 5
s[mask] = 5
assert_series_equal(cop, s)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, offset=datetools.bday) > ts.median()
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assert_((s[:4] == 0).all())
self.assert_(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
def test_getitem_box_float64(self):
value = self.ts[5]
self.assert_(isinstance(value, np.float64))
def test_getitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_setitem_ambiguous_keyerror(self):
s = Series(range(10), index=range(0, 20, 2))
self.assertRaises(KeyError, s.__setitem__, 1, 5)
self.assertRaises(KeyError, s.ix.__setitem__, 1, 5)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assert_(self.series.index[9] not in numSlice.index)
self.assert_(self.objSeries.index[9] not in objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assert_(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assert_((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1,2,17]] = np.NaN
self.ts[6] = np.NaN
self.assert_(np.isnan(self.ts[6]))
self.assert_(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assert_(not np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assert_((series[::2] == 0).all())
# set item that's not contained
self.assertRaises(Exception, self.series.__setitem__,
'foobar', 1)
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assert_(res is self.ts)
self.assertEqual(self.ts[idx], 0)
res = self.series.set_value('foobar', 0)
self.assert_(res is not self.series)
self.assert_(res.index[-1] == 'foobar')
self.assertEqual(res['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertEqual(len(sl.index.indexMap), len(sl.index))
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
self.assertRaises(Exception, self.ts.__getitem__,
(slice(None, None), 2))
self.assertRaises(Exception, self.ts.__setitem__,
(slice(None, None), 2), 2)
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=range(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3,4,7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEquals(self.ts.ix[d1], self.ts[d1])
self.assertEquals(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][::-1]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=range(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assert_((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assert_((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s[::-1]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_ix_setitem(self):
inds = self.series.index[[3,4,7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3,4,7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEquals(self.series[d1], 4)
self.assertEquals(self.series[d2], 6)
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# tuple name, e.g. from hierarchical index
self.series.name = ('foo', 'bar', 'baz')
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
def test_to_string(self):
from cStringIO import StringIO
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
self.assert_(retval is None)
self.assertEqual(buf.getvalue().strip(), s)
# pass float_format
format = '%.4f'.__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split('\n')]
expected = [format(x) for x in self.ts]
self.assertEqual(result, expected)
# empty string
result = self.ts[:0].to_string()
self.assertEqual(result, '')
result = self.ts[:0].to_string(length=0)
self.assertEqual(result, '')
# name and length
cp = self.ts.copy()
cp.name = 'foo'
result = cp.to_string(length=True, name=True)
last_line = result.split('\n')[-1].strip()
self.assertEqual(last_line, "Name: foo, Length: %d" % len(cp))
def test_to_string_mixed(self):
s = Series(['foo', np.nan, -1.23, 4.56])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 -1.23\n'
'3 4.56')
self.assertEqual(result, expected)
# but don't count NAs as floats
s = Series(['foo', np.nan, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 NaN\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
s = Series(['foo', 5, 'bar', 'baz'])
result = s.to_string()
expected = ('0 foo\n'
'1 5\n'
'2 bar\n'
'3 baz')
self.assertEqual(result, expected)
def test_to_string_float_na_spacing(self):
s = Series([0., 1.5678, 2., -3., 4.])
s[::2] = np.nan
result = s.to_string()
expected = ('0 NaN\n'
'1 1.568\n'
'2 NaN\n'
'3 -3.000\n'
'4 NaN')
self.assertEqual(result, expected)
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assert_(getkeys() is self.ts.index)
def test_values(self):
self.assert_(np.array_equal(self.ts, self.ts.values))
def test_iteritems(self):
for idx, val in self.series.iteritems():
self.assertEqual(val, self.series[idx])
for idx, val in self.ts.iteritems():
self.assertEqual(val, self.ts[idx])
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
assert_almost_equal(s.sum(), s2.sum())
import pandas.core.nanops as nanops
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
res = nanops.nansum(arr, axis=1)
expected = nanops._nansum(arr, axis=1)
assert_almost_equal(res, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = TimeSeries(np.ones(10, dtype=int), index=range(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_var(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_skew(self):
from scipy.stats import skew
alt =lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assert_(issubclass(argsorted.dtype.type, np.integer))
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def _check_stat_op(self, name, alternate, check_objects=False):
from pandas import DateRange
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assert_(notnull(f(self.series)))
self.assert_(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona))
allna = self.series * nan
self.assert_(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# check DateRange
if check_objects:
s = Series(DateRange('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_(np.array_equal(func(self.ts), func(np.array(self.ts))))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_(np.array_equal(result, expected))
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.] , index=range(3))
result = s.prod()
self.assert_(not isinstance(result, Series))
def test_quantile(self):
from scipy.stats import scoreatpercentile
q = self.ts.quantile(0.1)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, scoreatpercentile(self.ts.valid(), 90))
def test_describe(self):
_ = self.series.describe()
_ = self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count' : 7, 'unique' : 4,
'top' : 'a', 'freq' : 3}, index=result.index)
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.ts)
for idx, value in appendedSeries.iteritems():
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.ts.index:
self.assertEqual(value, self.ts[idx])
else:
self.fail("orphaned index!")
self.assertRaises(Exception, self.ts.append, self.ts)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
np.random.seed(12345)
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assert_(not bool_series.all())
self.assert_(bool_series.any())
def test_operators(self):
series = self.ts
other = self.ts[::2]
def _check_op(other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv',
'gt', 'ge', 'lt', 'le']
for opname in simple_ops:
_check_op(other, getattr(operator, opname))
_check_op(other, operator.pow, pos_only=True)
_check_op(other, lambda x, y: operator.add(y, x))
_check_op(other, lambda x, y: operator.sub(y, x))
_check_op(other, lambda x, y: operator.truediv(y, x))
_check_op(other, lambda x, y: operator.floordiv(y, x))
_check_op(other, lambda x, y: operator.mul(y, x))
_check_op(other, lambda x, y: operator.pow(y, x),
pos_only=True)
check(self.ts * 2)
check(self.ts * 0)
check(self.ts[::2])
check(5)
def check_comparators(other):
_check_op(other, operator.gt)
_check_op(other, operator.ge)
_check_op(other, operator.eq)
_check_op(other, operator.lt)
_check_op(other, operator.le)
check_comparators(5)
check_comparators(self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x' : 0.})
# it works!
_ = s1 * s2
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assert_(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmin()))
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assert_(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assert_(isnull(allna.idxmax()))
def test_operators_date(self):
result = self.objSeries + timedelta(1)
result = self.objSeries - timedelta(1)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assert_(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assert_(len(result) == 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_(np.array_equal(added[:-5], expected))
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
from pandas.util.testing import rands
import operator
# GH 353
vals = Series([rands(5) for _ in xrange(10)])
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals' : vals})
result = 'foo_' + frame
expected = DataFrame({'vals' : vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A' : self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
ops = [Series.add, Series.sub, Series.mul, Series.div]
equivs = [operator.add, operator.sub, operator.mul]
if py3compat.PY3:
equivs.append(operator.truediv)
else:
equivs.append(operator.div)
fillvals = [0, 0, 1, 1]
for op, equiv_op, fv in zip(ops, equivs, fillvals):
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_(np.array_equal(combined, series))
# Holes filled from input
combined = series_copy.combine_first(series)
self.assert_(np.isfinite(combined).all())
self.assert_(np.array_equal(combined[::2], series[::2]))
self.assert_(np.array_equal(combined[1::2], series_copy[1::2]))
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_corr(self):
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
# No overlap
self.assert_(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assert_(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
import scipy
import scipy.stats as stats
# kendall and spearman
A = | tm.makeTimeSeries() | pandas.util.testing.makeTimeSeries |
"""Tools to visualize the JHU CSSE COVID-19 Data and the forecasts made
with it using the model module.
"""
import numpy as np
import pandas as pd
from babel.dates import format_date
from babel.numbers import format_decimal
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
import seaborn as sns
from modules import processing
# Seaborn styling options
sns.set_style('darkgrid')
sns.set_context('paper')
sns.set_palette('muted')
palette = sns.xkcd_palette(['denim blue','pale red'])
blue, red = sns.xkcd_palette(['denim blue','pale red'])
# For localized formatting
locale = 'de_DE'
# Dates
today = pd.to_datetime('today').normalize()
yesterday = today - pd.Timedelta(1,'D')
footnote = 'Updated on {}. JHU CSSE COVID-19 Data: https://github.com/CSSEGISandData/COVID-19.'.format(format_date(today, locale=locale))
def cases(cases, cases_forecast=pd.DataFrame()):
fig, ax = plt.subplots()
if cases_forecast.empty == False:
sns.lineplot(data=cases_forecast, dashes=False, legend=False)
for i in np.arange(len(cases_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases')
ax.set_xlabel(None)
ax.set_ylabel('Cases')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y,p: format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def cases_per_million(cases, population, cases_forecast=pd.DataFrame()):
cases = cases / population * 1000000
cases_forecast = cases_forecast / population * 1000000
fig, ax = plt.subplots()
if cases_forecast.empty == False:
sns.lineplot(data=cases_forecast, dashes=False, legend=False)
for i in np.arange(len(cases_forecast.columns)):
ax.lines[i].set_linestyle('--')
sns.lineplot(data=cases, dashes=False)
ax.set_title('COVID-19 cases per million inhabitants')
ax.set_xlabel(None)
ax.set_ylabel('Cases per million inhabitants')
ax.yaxis.set_major_formatter(ticker.FuncFormatter(
lambda y, p : format_decimal(y, locale=locale)))
plt.xticks(rotation=45)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d %b'))
ax.legend(loc='upper left', bbox_to_anchor=(1.01, 0.95))
plt.figtext(0.5, -0.03, footnote, fontsize=6, ha='center')
plt.show()
return fig, ax
def deaths(deaths, deaths_forecast= | pd.DataFrame() | pandas.DataFrame |
'''
NOMBRE: <NAME>
ASIGNATURA: Buenas prácticas de programación
FECHA: 18/01/2022
TEMA 1: Control de errores, pruebas y validación de datos.
'''
import pandas as pd
import matplotlib.pyplot as plt
def abrir_fichero():
'''Función que abre un fichero .csv
Parámetros:
-----------
Ninguno
Devuelve:
--------
pandas dataframe
'''
try:
df = | pd.read_csv('finanzas2020[1].csv', sep='\t') | pandas.read_csv |
from sklearn.metrics import mean_squared_error,r2_score
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import argparse
import os
import json
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="../data/")
parser.add_argument("--model_dir", type=str, default="./model/")
parser.add_argument("--output_path", type=str, default="./output/")
parser.add_argument("--target", type=str, default="MEDV")
args = parser.parse_args()
test_dataset = None
if os.path.exists(os.path.join(args.data_dir,'test.csv')):
test_dataset = os.path.join(args.data_dir,'test.csv')
elif os.path.exists(os.path.join(args.data_dir,'val.csv')):
test_dataset = os.path.join(args.data_dir,'val.csv')
elif os.path.exists(os.path.join(args.data_dir,'train.csv')):
test_dataset = os.path.join(args.data_dir,'train.csv')
else:
print("ERROR:test file invalid!")
exit()
test_data = | pd.read_csv(test_dataset) | pandas.read_csv |
# This file is part of Patsy
# Copyright (C) 2012-2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# There are a number of unit tests in build.py, but this file contains more
# thorough tests of the overall design matrix building system. (These are
# still not exhaustive end-to-end tests, though -- for that see
# test_highlevel.py.)
from __future__ import print_function
import six
import numpy as np
from nose.tools import assert_raises
from patsy import PatsyError
from patsy.util import (atleast_2d_column_default,
have_pandas, have_pandas_categorical)
from patsy.desc import Term, INTERCEPT
from patsy.build import *
from patsy.categorical import C
from patsy.user_util import balanced, LookupFactor
from patsy.design_info import DesignMatrix
if have_pandas:
import pandas
def assert_full_rank(m):
m = atleast_2d_column_default(m)
if m.shape[1] == 0:
return True
u, s, v = np.linalg.svd(m)
rank = np.sum(s > 1e-10)
assert rank == m.shape[1]
def test_assert_full_rank():
assert_full_rank(np.eye(10))
assert_full_rank([[1, 0], [1, 0], [1, 0], [1, 1]])
assert_raises(AssertionError,
assert_full_rank, [[1, 0], [2, 0]])
assert_raises(AssertionError,
assert_full_rank, [[1, 2], [2, 4]])
assert_raises(AssertionError,
assert_full_rank, [[1, 2, 3], [1, 10, 100]])
# col1 + col2 = col3
assert_raises(AssertionError,
assert_full_rank, [[1, 2, 3], [1, 5, 6], [1, 6, 7]])
def make_termlist(*entries):
terms = []
for entry in entries:
terms.append(Term([LookupFactor(name) for name in entry]))
return terms
def check_design_matrix(mm, expected_rank, termlist, column_names=None):
assert_full_rank(mm)
assert set(mm.design_info.terms) == set(termlist)
if column_names is not None:
assert mm.design_info.column_names == column_names
assert mm.ndim == 2
assert mm.shape[1] == expected_rank
def make_matrix(data, expected_rank, entries, column_names=None):
termlist = make_termlist(*entries)
def iter_maker():
yield data
builders = design_matrix_builders([termlist], iter_maker)
matrices = build_design_matrices(builders, data)
matrix = matrices[0]
assert (builders[0].design_info.term_slices
== matrix.design_info.term_slices)
assert (builders[0].design_info.column_names
== matrix.design_info.column_names)
assert matrix.design_info.builder is builders[0]
check_design_matrix(matrix, expected_rank, termlist,
column_names=column_names)
return matrix
def test_simple():
data = balanced(a=2, b=2)
x1 = data["x1"] = np.linspace(0, 1, len(data["a"]))
x2 = data["x2"] = data["x1"] ** 2
m = make_matrix(data, 2, [["a"]], column_names=["a[a1]", "a[a2]"])
assert np.allclose(m, [[1, 0], [1, 0], [0, 1], [0, 1]])
m = make_matrix(data, 2, [[], ["a"]], column_names=["Intercept", "a[T.a2]"])
assert np.allclose(m, [[1, 0], [1, 0], [1, 1], [1, 1]])
m = make_matrix(data, 4, [["a", "b"]],
column_names=["a[a1]:b[b1]", "a[a2]:b[b1]",
"a[a1]:b[b2]", "a[a2]:b[b2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
m = make_matrix(data, 4, [[], ["a"], ["b"], ["a", "b"]],
column_names=["Intercept", "a[T.a2]",
"b[T.b2]", "a[T.a2]:b[T.b2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[1, 0, 1, 0],
[1, 1, 0, 0],
[1, 1, 1, 1]])
m = make_matrix(data, 4, [[], ["b"], ["a"], ["b", "a"]],
column_names=["Intercept", "b[T.b2]",
"a[T.a2]", "b[T.b2]:a[T.a2]"])
assert np.allclose(m, [[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 0, 1, 0],
[1, 1, 1, 1]])
m = make_matrix(data, 4, [["a"], ["x1"], ["a", "x1"]],
column_names=["a[a1]", "a[a2]", "x1", "a[T.a2]:x1"])
assert np.allclose(m, [[1, 0, x1[0], 0],
[1, 0, x1[1], 0],
[0, 1, x1[2], x1[2]],
[0, 1, x1[3], x1[3]]])
m = make_matrix(data, 3, [["x1"], ["x2"], ["x2", "x1"]],
column_names=["x1", "x2", "x2:x1"])
assert np.allclose(m, np.column_stack((x1, x2, x1 * x2)))
def test_R_bugs():
data = balanced(a=2, b=2, c=2)
data["x"] = np.linspace(0, 1, len(data["a"]))
# For "1 + a:b", R produces a design matrix with too many columns (5
# instead of 4), because it can't tell that there is a redundancy between
# the two terms.
make_matrix(data, 4, [[], ["a", "b"]])
# For "0 + a:x + a:b", R produces a design matrix with too few columns (4
# instead of 6), because it thinks that there is a redundancy which
# doesn't exist.
make_matrix(data, 6, [["a", "x"], ["a", "b"]])
# This can be compared with "0 + a:c + a:b", where the redundancy does
# exist. Confusingly, adding another categorical factor increases the
# baseline dimensionality to 8, and then the redundancy reduces it to 6
# again, so the result is the same as before but for different reasons. (R
# does get this one right, but we might as well test it.)
make_matrix(data, 6, [["a", "c"], ["a", "b"]])
def test_redundancy_thoroughly():
# To make sure there aren't any lurking bugs analogous to the ones that R
# has (see above), we check that we get the correct matrix rank for every
# possible combination of 2 categorical and 2 numerical factors.
data = balanced(a=2, b=2, repeat=5)
data["x1"] = np.linspace(0, 1, len(data["a"]))
data["x2"] = data["x1"] ** 2
def all_subsets(l):
if not l:
yield tuple()
else:
obj = l[0]
for subset in all_subsets(l[1:]):
yield tuple(sorted(subset))
yield tuple(sorted((obj,) + subset))
all_terms = list(all_subsets(("a", "b", "x1", "x2")))
all_termlist_templates = list(all_subsets(all_terms))
print(len(all_termlist_templates))
# eliminate some of the symmetric versions to speed things up
redundant = [[("b",), ("a",)],
[("x2",), ("x1",)],
[("b", "x2"), ("a", "x1")],
[("a", "b", "x2"), ("a", "b", "x1")],
[("b", "x1", "x2"), ("a", "x1", "x2")]]
count = 0
for termlist_template in all_termlist_templates:
termlist_set = set(termlist_template)
for dispreferred, preferred in redundant:
if dispreferred in termlist_set and preferred not in termlist_set:
break
else:
expanded_terms = set()
for term_template in termlist_template:
numeric = tuple([t for t in term_template if t.startswith("x")])
rest = [t for t in term_template if not t.startswith("x")]
for subset_rest in all_subsets(rest):
expanded_terms.add(frozenset(subset_rest + numeric))
# Because our categorical variables have 2 levels, each expanded
# term corresponds to 1 unique dimension of variation
expected_rank = len(expanded_terms)
if termlist_template in [(), ((),)]:
# No data dependence, should fail
assert_raises(PatsyError,
make_matrix,
data, expected_rank, termlist_template)
else:
make_matrix(data, expected_rank, termlist_template)
count += 1
print(count)
test_redundancy_thoroughly.slow = 1
def test_data_types():
basic_dict = {"a": ["a1", "a2", "a1", "a2"],
"x": [1, 2, 3, 4]}
# On Python 2, this is identical to basic_dict:
basic_dict_bytes = dict(basic_dict)
basic_dict_bytes["a"] = [s.encode("ascii") for s in basic_dict_bytes["a"]]
# On Python 3, this is identical to basic_dict:
basic_dict_unicode = {"a": ["a1", "a2", "a1", "a2"],
"x": [1, 2, 3, 4]}
basic_dict_unicode = dict(basic_dict)
basic_dict_unicode["a"] = [six.text_type(s) for s in basic_dict_unicode["a"]]
structured_array_bytes = np.array(list(zip(basic_dict["a"],
basic_dict["x"])),
dtype=[("a", "S2"), ("x", int)])
structured_array_unicode = np.array(list(zip(basic_dict["a"],
basic_dict["x"])),
dtype=[("a", "U2"), ("x", int)])
recarray_bytes = structured_array_bytes.view(np.recarray)
recarray_unicode = structured_array_unicode.view(np.recarray)
datas = [basic_dict, structured_array_bytes, structured_array_unicode,
recarray_bytes, recarray_unicode]
if have_pandas:
df_bytes = pandas.DataFrame(basic_dict_bytes)
datas.append(df_bytes)
df_unicode = pandas.DataFrame(basic_dict_unicode)
datas.append(df_unicode)
for data in datas:
m = make_matrix(data, 4, [["a"], ["a", "x"]],
column_names=["a[a1]", "a[a2]", "a[a1]:x", "a[a2]:x"])
assert np.allclose(m, [[1, 0, 1, 0],
[0, 1, 0, 2],
[1, 0, 3, 0],
[0, 1, 0, 4]])
def test_build_design_matrices_dtype():
data = {"x": [1, 2, 3]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x")], iter_maker)[0]
mat = build_design_matrices([builder], data)[0]
assert mat.dtype == np.dtype(np.float64)
mat = build_design_matrices([builder], data, dtype=np.float32)[0]
assert mat.dtype == np.dtype(np.float32)
if hasattr(np, "float128"):
mat = build_design_matrices([builder], data, dtype=np.float128)[0]
assert mat.dtype == np.dtype(np.float128)
def test_return_type():
data = {"x": [1, 2, 3]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x")], iter_maker)[0]
# Check explicitly passing return_type="matrix" works
mat = build_design_matrices([builder], data, return_type="matrix")[0]
assert isinstance(mat, DesignMatrix)
# Check that nonsense is detected
assert_raises(PatsyError,
build_design_matrices, [builder], data,
return_type="asdfsadf")
def test_NA_action():
initial_data = {"x": [1, 2, 3], "c": ["c1", "c2", "c1"]}
def iter_maker():
yield initial_data
builder = design_matrix_builders([make_termlist("x", "c")], iter_maker)[0]
# By default drops rows containing either NaN or None
mat = build_design_matrices([builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)})[0]
assert mat.shape == (1, 3)
assert np.array_equal(mat, [[1.0, 0.0, 10.0]])
# NA_action="a string" also accepted:
mat = build_design_matrices([builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)},
NA_action="drop")[0]
assert mat.shape == (1, 3)
assert np.array_equal(mat, [[1.0, 0.0, 10.0]])
# And objects
from patsy.missing import NAAction
# allows NaN's to pass through
NA_action = NAAction(NA_types=[])
mat = build_design_matrices([builder],
{"x": [10.0, np.nan],
"c": np.asarray(["c1", "c2"],
dtype=object)},
NA_action=NA_action)[0]
assert mat.shape == (2, 3)
# According to this (and only this) function, NaN == NaN.
np.testing.assert_array_equal(mat, [[1.0, 0.0, 10.0], [0.0, 1.0, np.nan]])
# NA_action="raise"
assert_raises(PatsyError,
build_design_matrices,
[builder],
{"x": [10.0, np.nan, 20.0],
"c": np.asarray(["c1", "c2", None],
dtype=object)},
NA_action="raise")
def test_NA_drop_preserves_levels():
# Even if all instances of some level are dropped, we still include it in
# the output matrix (as an all-zeros column)
data = {"x": [1.0, np.nan, 3.0], "c": ["c1", "c2", "c3"]}
def iter_maker():
yield data
builder = design_matrix_builders([make_termlist("x", "c")], iter_maker)[0]
assert builder.design_info.column_names == ["c[c1]", "c[c2]", "c[c3]", "x"]
mat, = build_design_matrices([builder], data)
assert mat.shape == (2, 4)
assert np.array_equal(mat, [[1.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 3.0]])
def test_return_type_pandas():
if not have_pandas:
return
data = pandas.DataFrame({"x": [1, 2, 3],
"y": [4, 5, 6],
"a": ["a1", "a2", "a1"]},
index=[10, 20, 30])
def iter_maker():
yield data
int_builder, = design_matrix_builders([make_termlist([])], iter_maker)
(y_builder, x_builder) = design_matrix_builders([make_termlist("y"),
make_termlist("x")],
iter_maker)
(x_a_builder,) = design_matrix_builders([make_termlist("x", "a")],
iter_maker)
(x_y_builder,) = design_matrix_builders([make_termlist("x", "y")],
iter_maker)
# Index compatibility is always checked for pandas input, regardless of
# whether we're producing pandas output
assert_raises(PatsyError,
build_design_matrices,
[x_a_builder], {"x": data["x"], "a": data["a"][::-1]})
assert_raises(PatsyError,
build_design_matrices,
[y_builder, x_builder],
{"x": data["x"], "y": data["y"][::-1]})
# And we also check consistency between data.index and value indexes
# Creating a mismatch between these is a bit tricky. We want a data object
# such that isinstance(data, DataFrame), but data["x"].index !=
# data.index.
class CheatingDataFrame(pandas.DataFrame):
def __getitem__(self, key):
if key == "x":
return pandas.DataFrame.__getitem__(self, key)[::-1]
else:
return pandas.DataFrame.__getitem__(self, key)
assert_raises(PatsyError,
build_design_matrices,
[x_builder],
CheatingDataFrame(data))
# A mix of pandas input and unindexed input is fine
(mat,) = build_design_matrices([x_y_builder],
{"x": data["x"], "y": [40, 50, 60]})
assert np.allclose(mat, [[1, 40], [2, 50], [3, 60]])
# with return_type="dataframe", we get out DataFrames with nice indices
# and nice column names and design_info
y_df, x_df = build_design_matrices([y_builder, x_builder], data,
return_type="dataframe")
assert isinstance(y_df, pandas.DataFrame)
assert isinstance(x_df, pandas.DataFrame)
assert np.array_equal(y_df, [[4], [5], [6]])
assert np.array_equal(x_df, [[1], [2], [3]])
assert np.array_equal(y_df.index, [10, 20, 30])
assert np.array_equal(x_df.index, [10, 20, 30])
assert np.array_equal(y_df.columns, ["y"])
assert np.array_equal(x_df.columns, ["x"])
assert y_df.design_info.column_names == ["y"]
assert x_df.design_info.column_names == ["x"]
assert y_df.design_info.term_names == ["y"]
assert x_df.design_info.term_names == ["x"]
# Same with mix of pandas and unindexed info, even if in different
# matrices
y_df, x_df = build_design_matrices([y_builder, x_builder],
{"y": [7, 8, 9], "x": data["x"]},
return_type="dataframe")
assert isinstance(y_df, pandas.DataFrame)
assert isinstance(x_df, pandas.DataFrame)
assert np.array_equal(y_df, [[7], [8], [9]])
assert np.array_equal(x_df, [[1], [2], [3]])
assert np.array_equal(y_df.index, [10, 20, 30])
assert np.array_equal(x_df.index, [10, 20, 30])
assert np.array_equal(y_df.columns, ["y"])
assert np.array_equal(x_df.columns, ["x"])
assert y_df.design_info.column_names == ["y"]
assert x_df.design_info.column_names == ["x"]
assert y_df.design_info.term_names == ["y"]
assert x_df.design_info.term_names == ["x"]
# Check categorical works for carrying index too
(x_a_df,) = build_design_matrices([x_a_builder],
{"x": [-1, -2, -3], "a": data["a"]},
return_type="dataframe")
assert isinstance(x_a_df, pandas.DataFrame)
assert np.array_equal(x_a_df, [[1, 0, -1], [0, 1, -2], [1, 0, -3]])
assert np.array_equal(x_a_df.index, [10, 20, 30])
# And if we have no indexed input, then we let pandas make up an index as
# per its usual rules:
(x_y_df,) = build_design_matrices([x_y_builder],
{"y": [7, 8, 9], "x": [10, 11, 12]},
return_type="dataframe")
assert isinstance(x_y_df, pandas.DataFrame)
assert np.array_equal(x_y_df, [[10, 7], [11, 8], [12, 9]])
assert np.array_equal(x_y_df.index, [0, 1, 2])
# If 'data' is a DataFrame, then that suffices, even if no factors are
# available.
(int_df,) = build_design_matrices([int_builder], data,
return_type="dataframe")
assert isinstance(int_df, pandas.DataFrame)
assert np.array_equal(int_df, [[1], [1], [1]])
assert int_df.index.equals([10, 20, 30])
import patsy.build
had_pandas = patsy.build.have_pandas
try:
patsy.build.have_pandas = False
# return_type="dataframe" gives a nice error if pandas is not available
assert_raises(PatsyError,
build_design_matrices,
[x_builder], {"x": [1, 2, 3]}, return_type="dataframe")
finally:
patsy.build.have_pandas = had_pandas
x_df, = build_design_matrices([x_a_builder],
{"x": [1.0, np.nan, 3.0],
"a": np.asarray([None, "a2", "a1"],
dtype=object)},
NA_action="drop",
return_type="dataframe")
assert x_df.index.equals([2])
def test_data_mismatch():
test_cases_twoway = [
# Data type mismatch
([1, 2, 3], [True, False, True]),
(C(["a", "b", "c"], levels=["c", "b", "a"]),
C(["a", "b", "c"], levels=["a", "b", "c"])),
# column number mismatches
([[1], [2], [3]], [[1, 1], [2, 2], [3, 3]]),
([[1, 1, 1], [2, 2, 2], [3, 3, 3]], [[1, 1], [2, 2], [3, 3]]),
]
test_cases_oneway = [
([1, 2, 3], ["a", "b", "c"]),
([1, 2, 3], C(["a", "b", "c"])),
([True, False, True], C(["a", "b", "c"])),
([True, False, True], ["a", "b", "c"]),
]
setup_predict_only = [
# This is not an error if both are fed in during make_builders, but it
# is an error to pass one to make_builders and the other to
# make_matrices.
(["a", "b", "c"], ["a", "b", "d"]),
]
termlist = make_termlist(["x"])
def t_incremental(data1, data2):
def iter_maker():
yield {"x": data1}
yield {"x": data2}
try:
builders = design_matrix_builders([termlist], iter_maker)
build_design_matrices(builders, {"x": data1})
build_design_matrices(builders, {"x": data2})
except PatsyError:
pass
else:
raise AssertionError
def t_setup_predict(data1, data2):
def iter_maker():
yield {"x": data1}
builders = design_matrix_builders([termlist], iter_maker)
assert_raises(PatsyError,
build_design_matrices, builders, {"x": data2})
for (a, b) in test_cases_twoway:
t_incremental(a, b)
t_incremental(b, a)
t_setup_predict(a, b)
t_setup_predict(b, a)
for (a, b) in test_cases_oneway:
t_incremental(a, b)
t_setup_predict(a, b)
for (a, b) in setup_predict_only:
t_setup_predict(a, b)
t_setup_predict(b, a)
assert_raises(PatsyError,
make_matrix, {"x": [1, 2, 3], "y": [1, 2, 3, 4]},
2, [["x"], ["y"]])
def test_data_independent_builder():
data = {"x": [1, 2, 3]}
def iter_maker():
yield data
# Trying to build a matrix that doesn't depend on the data at all is an
# error, if:
# - the index argument is not given
# - the data is not a DataFrame
# - there are no other matrices
null_builder = design_matrix_builders([make_termlist()], iter_maker)[0]
assert_raises(PatsyError, build_design_matrices, [null_builder], data)
intercept_builder = design_matrix_builders([make_termlist([])],
iter_maker)[0]
assert_raises(PatsyError, build_design_matrices, [intercept_builder], data)
assert_raises(PatsyError,
build_design_matrices,
[null_builder, intercept_builder], data)
# If data is a DataFrame, it sets the number of rows.
if have_pandas:
int_m, null_m = build_design_matrices([intercept_builder,
null_builder],
pandas.DataFrame(data))
assert np.allclose(int_m, [[1], [1], [1]])
assert null_m.shape == (3, 0)
# If there are other matrices that do depend on the data, we make the
# data-independent matrices have the same number of rows.
x_termlist = make_termlist(["x"])
builders = design_matrix_builders([x_termlist, make_termlist()],
iter_maker)
x_m, null_m = build_design_matrices(builders, data)
assert np.allclose(x_m, [[1], [2], [3]])
assert null_m.shape == (3, 0)
builders = design_matrix_builders([x_termlist, make_termlist([])],
iter_maker)
x_m, null_m = build_design_matrices(builders, data)
x_m, intercept_m = build_design_matrices(builders, data)
assert np.allclose(x_m, [[1], [2], [3]])
assert np.allclose(intercept_m, [[1], [1], [1]])
def test_same_factor_in_two_matrices():
data = {"x": [1, 2, 3], "a": ["a1", "a2", "a1"]}
def iter_maker():
yield data
t1 = make_termlist(["x"])
t2 = make_termlist(["x", "a"])
builders = design_matrix_builders([t1, t2], iter_maker)
m1, m2 = build_design_matrices(builders, data)
check_design_matrix(m1, 1, t1, column_names=["x"])
assert np.allclose(m1, [[1], [2], [3]])
check_design_matrix(m2, 2, t2, column_names=["x:a[a1]", "x:a[a2]"])
assert np.allclose(m2, [[1, 0], [0, 2], [3, 0]])
def test_categorical():
data_strings = {"a": ["a1", "a2", "a1"]}
data_categ = {"a": C(["a2", "a1", "a2"])}
datas = [data_strings, data_categ]
if have_pandas_categorical:
data_pandas = {"a": | pandas.Categorical.from_array(["a1", "a2", "a2"]) | pandas.Categorical.from_array |
# Library imports
print('Starting library imports')
import pandas as pd
import numpy as np
import warnings
import inflection
import re
import pymysql
import pickle
import boto3
import boto3.session
import s3fs
from datetime import datetime
from sqlalchemy import create_engine
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.ensemble import RandomForestRegressor
import umap.umap_ as umap
print('Library imports: OK')
# Loading data ===========================================================================
print('Loading data from SQL database')
db_credentials = pd.read_csv('s3://gustavoawsbucketds/db_credentials.txt', header=None)
# DB creentials
user = db_credentials[0][0]
psw = db_credentials[1][0]
host = db_credentials[2][0]
port = db_credentials[3][0]
schema = db_credentials[4][0]
schema_2 = db_credentials[5][0]
# Selecting data from database - SQL query ('purchases' table - ecommerce schema)
query = """
SELECT *
FROM purchases
"""
# creating the conection to existing db
connection = create_engine('mysql+pymysql://{}:{}@{}:{}/{}'.format(user, psw, host, port, schema))
# executing sql query
df = pd.read_sql_query(query, con=connection)
# closing database connection
connection.dispose()
print('Loading data from SQL database done')
# 1 - Data description ========================================================================
print('\nStarting Data description step')
# Adjusting column names
df.columns = list(map(lambda x: inflection.underscore(x), df.columns)) #changing to underscore + lower(snakecase)
# Replaca NA's
df = df.dropna(subset=['description', 'customer_id'])
# Changing data types
# Changing column 'invoice_date' to datetime
df['invoice_date'] = pd.to_datetime(df['invoice_date'], format='%d-%b-%y')
# Changing column 'customer_id' to int
df['customer_id'] = df['customer_id'].astype(int)
print('Data description step done')
# 2 - Data filtering ========================================================================
print('\nStarting Data filtering step')
# Removing inconsistencies
# 'unit_price' column:
# we are going to ignore 'unit_price'==0. We will consider 'unite_price'>0.04
df = df.loc[df['unit_price']>0.04, :]
# 'stock_code' column:
# removing the rows where the values are one of these: ['POST' 'D' 'M' 'PADS' 'DOT' 'CRUK']
df = df.loc[~df['stock_code'].isin(['POST' 'D' 'M' 'PADS' 'DOT' 'CRUK'])]
# 'description' column:
# removing 'description' column assuming it does not have relevance information
df = df.drop(columns='description')
# 'country' column (map)
# removing rows where 'country' == 'European Community', 'Unspecified'
df = df.loc[~df['country'].isin(['European Community', 'Unspecified']), :]
# 'quantity' column:
# getting a dataframe with only returns operations
df_2_returns = df.loc[df['quantity'] < 0, :]
# getting a dataframe with only purchases operations
df_2_purchases = df.loc[df['quantity'] >= 0, :]
# Removing inconsistencies in observations:
# based on previous univariate analysis, we investigated for some potential outliers (customers with unusual purchase behaviour)
## we are going to remove these observations
# 'customer_id' == 16446 (this customer had two records that do not represent actual purchases, and 2 more records with only 1 item purchased each)
## should be removed because this customer is generating distortion in the avg_ticket calculation
df_2_purchases = df_2_purchases[~df_2_purchases['customer_id'].isin([16446])]
# ***********************************************
# Saving cleaned purchases table into a sql database to be further consumed by an external visualization tool via sql query
# creating the conection to existing db
connection = create_engine('mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format(user, psw, host, port, schema_2))
# inserting data to database
df_2_purchases.to_sql( 'purchases', con=connection, if_exists='append', index=False)
# ***********************************************
# closing database connection
connection.dispose()
print('Purchases cleaned table saved - SQL')
print('Data filtering step done')
# 3 - Feature engineering ========================================================================
print('\nStarting feature engineering step')
# Feature creation
# Creating reference dataframe
df_ref = df.drop(['invoice_no', 'stock_code', 'quantity', 'invoice_date',
'unit_price', 'country'], axis=1).drop_duplicates(ignore_index=True)
# Creating 'gross_revenue' column to df_2_purchases (gross_revenue = unit_price * quantity)
df_2_purchases['gross_revenue'] = df_2_purchases['unit_price'] * df_2_purchases['quantity']
# Monetary (grouping 'gross_revenue' by customer)
df_monetary = df_2_purchases[['customer_id', 'gross_revenue']].groupby('customer_id').sum().reset_index()
# Joining df_ref and df_monetary
df_ref = pd.merge(left=df_ref, right=df_monetary, how='left', on='customer_id')
# Recency - last day purchase (grouping 'invoice_date' by customer and getting de max date)
df_recency = df_2_purchases[['customer_id', 'invoice_date']].groupby('customer_id').max().reset_index() # creating df_recency
df_recency['recency'] = df_2_purchases['invoice_date'].max() - df_recency['invoice_date'] # adding 'recency' column (last purchase day of each customer - max day of dataset)
df_recency['recency'] = df_recency['recency'].apply( lambda x: x.days) # extrating the number of days (from X days)
# Joining df_ref and df_recency
df_ref = pd.merge(left=df_ref, right=df_recency[['customer_id','recency']], how='left', on='customer_id')
# Quantity of purchases by costumer - purchase frequency of each customer
df_purch_cost = df_2_purchases[['customer_id','invoice_no']].drop_duplicates().groupby('customer_id').count().reset_index()
df_purch_cost.columns = ['customer_id', 'purchase_by_costumer'] # renaming column 'invoice' to 'purchase_by_costumer'
# Joining df_ref and df_purch_cost
df_ref = pd.merge(left=df_ref, right=df_purch_cost, how='left', on='customer_id')
# Number of items purchased
df_total_purchased = df_2_purchases[['customer_id','quantity']].groupby('customer_id').sum().reset_index()
df_total_purchased.columns = ['customer_id', 'number_items_purchased'] # renaming column 'invoice' to 'purchase_by_costumer'
# Joining df_ref and df_total_purchased
df_ref = | pd.merge(left=df_ref, right=df_total_purchased, how='left', on='customer_id') | pandas.merge |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import json
import import_db_assessment
def createTransformersVariable(transformerRule):
# Convert the JSON fields into variables like dictionaries, lists, string and numbers and return it
if str(transformerRule['action_details']['datatype']).upper() == 'DICTIONARY':
# For dictionaries
return json.loads(str(transformerRule['action_details']['value']).strip())
elif str(transformerRule['action_details']['datatype']).upper() == 'LIST':
# For Lists it is expected to be separated by comma
return str(transformerRule['action_details']['value']).split(',')
elif str(transformerRule['action_details']['datatype']).upper() == 'STRING':
# For strings we just need to carry out the content
return str(transformerRule['action_details']['value'])
elif str(transformerRule['action_details']['datatype']).upper() == 'NUMBER':
# For number we are casting it to float
return float(transformerRule['action_details']['value'])
else:
# If the JSON file has any value not expected
return None
def runRules(transformerRules, dataFrames, singleRule, args, collectionKey, transformersTablesSchema, fileList, rulesAlreadyExecuted, transformersParameters):
# Variable to keep track of rules executed and its results and status
transformerResults = {}
# Variable to keep track and make available all the variables from the JSON file
transformersRulesVariables = {}
# Standardize Statuses
# Executed
EXECUTEDSTATUS = 'EXECUTED'
FAILEDSTATUS = 'FAILED'
if singleRule:
# If parameter is set then we will run only 1 rule
sorted_keys = []
sorted_keys.append(singleRule)
else:
# Getting ordered list of keys by priority to iterate over the dictionary
sorted_keys = sorted(transformerRules, key=lambda x: (transformerRules[x]['priority']))
# Looping on ALL rules from transformers.json
for ruleItem in sorted_keys:
stringExpression = getParsedRuleExpr(transformerRules[ruleItem]['expr1'])
iferrorExpression = getParsedRuleExpr(transformerRules[ruleItem]['iferror'])
if str(transformerRules[ruleItem]['status']).upper() == "ENABLED":
if ruleItem not in rulesAlreadyExecuted:
print('Processing rule item: "{}"\nPriority: "{}"'.format(ruleItem,transformerRules[ruleItem]['priority']))
if str(transformerRules[ruleItem]['type']).upper() == "VARIABLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE":
# transformers.json asking to create a variable which is a dictionary
try:
transformerResults[ruleItem] = {'Status': EXECUTEDSTATUS, 'Result Value': createTransformersVariable(transformerRules[ruleItem])}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = transformerResults[ruleItem]['Result Value']
except:
# In case of any issue the rule will be marked as FAILEDSTATUS
transformerResults[ruleItem] = {'Status': FAILEDSTATUS, 'Result Value': None}
transformersRulesVariables[transformerRules[ruleItem]['action_details']['varname']] = None
elif str(transformerRules[ruleItem]['type']).upper() in ("NUMBER","FREESTYLE") and str(transformerRules[ruleItem]['action']).upper() == "ADD_OR_UPDATE_COLUMN":
# transformers.json asking to add a column that is type number meaning it can be a calculation and the column to be added is NUMBER too
# Where the result of expr1 will be saved initially
dfTargetName = transformerRules[ruleItem]['action_details']['dataframe_name']
columnTargetName = transformerRules[ruleItem]['action_details']['column_name']
ruleCondition = True
try:
ruleConditionString = str(transformerRules[ruleItem]['ifcondition1'])
except KeyError:
ruleConditionString = None
# In case ifcondition1 (transformers.json) is set for the rule
if ruleConditionString is not None and ruleConditionString != "":
try:
ruleCondition = eval (ruleConditionString)
print ('ruleCondition = {}'.format(ruleCondition))
except:
print ('\n Error processing ifcondition1 "{}" for rule "{}". So, this rule will be skipped.\n'.format(ruleConditionString,ruleItem))
continue
if not ruleCondition:
print ('WARNING: This rule "{}" will be skipped because of "ifcondition1" from transformers.json is FALSE.'.format(ruleItem))
continue
try:
dataFrames[str(dfTargetName).upper()][str(columnTargetName).upper()] = execStringExpression(stringExpression,iferrorExpression, dataFrames)
df = dataFrames[str(dfTargetName).upper()]
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the variable "{}" used in the transformers.json could not be found.\n'.format(ruleItem, str(dfTargetName).upper()))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "CREATE_OR_REPLACE_DATAFRAME":
#
df = execStringExpression(stringExpression,iferrorExpression,dataFrames)
if df is None:
print('\n WARNING: The rule "{}" could not be executed because the expression "{}" used in the transformers.json could not be executed.\n'.format(ruleItem,stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(transformerRules[ruleItem]['action_details']['dataframe_name']).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
elif str(transformerRules[ruleItem]['type']).upper() == "FREESTYLE" and str(transformerRules[ruleItem]['action']).upper() == "FREESTYLE":
try:
eval (stringExpression)
except KeyError:
print ('\n WARNING: The rule "{}" could not be executed because the expr1 "{}" used in the transformers.json could not be executed.\n'.format(ruleItem, stringExpression))
continue
newTableName = str(transformerRules[ruleItem]['action_details']['target_dataframe_name']).lower()
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + newTableName + '__' + collectionKey
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformerRules[ruleItem]['action_details'], args, fileName, transformersTablesSchema, newTableName, False)
# Creating the new dataframe
dataFrames[str(newTableName).upper()] = df
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
return transformerResults, transformersRulesVariables, fileList, dataFrames
def execStringExpression(stringExpression,iferrorExpression, dataFrames):
try:
res = eval (stringExpression)
except:
try:
res = eval (iferrorExpression)
except:
res = None
return res
def getParsedRuleExpr(ruleExpr):
# Function to get a clean string to be executed in eval function. The input is a string with many components separated by ; coming from transformers.json
ruleComponents = []
ruleComponents = str(ruleExpr).split(';')
finalExpression = ''
for ruleItem in ruleComponents:
ruleItem = ruleItem.strip()
finalExpression = str(finalExpression) + str(ruleItem) + ' '
return finalExpression
def getRulesFromJSON(jsonFileName):
# Read JSON file from the OS and turn it into a hash table
with open(jsonFileName) as f:
transformerRules = json.load(f)
return transformerRules
def getDataFrameFromCSV(csvFileName,tableName,skipRows,separatorString,transformersTablesSchema):
# Read CSV files from OS and turn it into a dataframe
paramCleanDFHeaders = False
paramGetHeadersFromConfig = True
try:
if paramGetHeadersFromConfig:
if transformersTablesSchema.get(tableName):
try:
tableHeaders = getDFHeadersFromTransformers(tableName,transformersTablesSchema)
tableHeaders = [header.upper() for header in tableHeaders]
df = pd.read_csv(csvFileName, skiprows=skipRows+1, header=None, names=tableHeaders)
except Exception as dataframeHeaderErr:
print ('\nThe filename {} for the table {} could not be imported using the column names {}.\n'.format(csvFileName,tableName,tableHeaders))
paramCleanDFHeaders = True
df = pd.read_csv(csvFileName, skiprows=skipRows)
else:
df = pd.read_csv(csvFileName, skiprows=skipRows)
# In case we need to clean some headers from dataframe
if paramCleanDFHeaders:
columList = df.columns.values.tolist()
columList = cleanCSVHeaders(columList)
columList = str(columList).strip().split(',')
columList = [column.strip() for column in columList]
df.columns = columList
except Exception as generalErr:
print ('\nThe filename {} is most likely empty.\n'.format(csvFileName))
return False
return df
def getDFHeadersFromTransformers(tableName,transformersTablesSchema):
tableConfig = transformersTablesSchema.get(tableName)
tableHeaders = [header[0] for header in tableConfig]
return tableHeaders
def getAllDataFrames(fileList, skipRows, collectionKey, args, transformersTablesSchema, dbAssessmentDataframes, transformersParameters):
# Fuction to read from CSVs and store the data into a dataframe. The dataframe is placed then into a Hash Table.
# This function returns a dictionary with dataframes from CSVs
separatorString = args.sep
# Hash table to store dataframes after being loaded from CSVs
dataFrames = dbAssessmentDataframes
fileList.sort()
for fileName in fileList:
# Verifying if the file is a file that came from the SQL Script or is this is a result of a previous execution from transformers.json in which a file had been saved. I.E: Reshaped Dataframes
collectionType = import_db_assessment.getObjNameFromFiles(str(fileName),'__',0)
collectionType = collectionType.split('/')[-1]
if collectionType == 'opdbt':
# This file is not from SQL Script. Meaning this is a file generated by Optimus Prime in a prior execution. Skipping CSV files that are result of a previous transformation execution
continue
# Final table name from the CSV file names
tableName = import_db_assessment.getObjNameFromFiles(fileName,'__',1)
# Storing Dataframe in a Hash Table using as a key the final Table name coming from CSV filename
df = getDataFrameFromCSV(fileName,tableName,skipRows,separatorString,transformersTablesSchema)
# Checking if no error was found during loading CSV from OS
if df is not False:
# Trimming the data before storing it
dataFrames[str(tableName).upper()] = trimDataframe(df)
transformersTablesSchema = processSchemaDetection(args.schemadetection,transformersTablesSchema, transformersParameters, tableName, df)
return dataFrames, transformersTablesSchema
def processSchemaDetection(schemadetection,transformersTablesSchema, transformersParameters, tableName, df):
if str(schemadetection).upper() == 'AUTO':
# In the arguments if we want to use AUTO schema detection
# Replaces whatever is in there
transformersTablesSchema[tableName] = addBQDataType(list(df.columns), 'STRING')
elif str(schemadetection).upper() == 'FILLGAP':
# In the arguments if we want to try to only use it when the configuration file do not have it already
if transformersTablesSchema.get(str(tableName).lower()) is None:
# Adds configuration whenever this is not present
transformersTablesSchema[str(tableName).lower()] = addBQDataType(list(df.columns), 'STRING')
print('WARNING: Optimus Prime is filling the gap in the transformers.json schema definition for {} table.\n'.format(tableName))
return transformersTablesSchema
def addBQDataType(columList, dataType):
newColumnList = []
# Cleaning header
columList = cleanCSVHeaders(columList)
columList = str(columList).split(',')
for column in columList:
newColumnList.append([column,dataType])
return newColumnList
def cleanCSVHeaders(headerString):
headerString = str(headerString).replace("'||","").replace("||'","").replace("'","").replace('"','').replace("[","").replace("]","").replace(" ","").strip()
return headerString
def trimDataframe(df):
# Removing spaces (TRIM/Strip) for ALL columns
df.columns = df.columns.str.replace(' ', '')
cols = list(df.columns)
#df[cols] = df[cols].apply(lambda x: x.str.strip())
#df = df.applymap(lambda x: x.strip() if isinstance(x, str) else x)
for column in cols:
try:
df[column] = df[column].str.strip()
except:
None
# trimmed dataframe
return df
def rewriteTrimmedCSVData(dataFrames, transformersParameters, transformersTablesSchema, fileList):
# To be Deleted
if transformersParameters.get('op_trim_csv_data') is not None:
for csvTableData in list(transformersParameters['op_trim_csv_data']):
if dataFrames.get(str(csvTableData).upper()) is not None:
df = dataFrames[str(csvTableData).upper()]
df = trimDataframe(df)
# collectionKey already contains .log
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + reshapedTableName + '__' + str(collectionKey)
# Writes CSVs from Dataframes when parameter store in CSV_ONLY or BIGQUERY
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(df, transformersResults[str(tableName)], args, fileName, transformersTablesSchema, str(reshapedTableName).lower(), True)
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
def getAllReShapedDataframes(dataFrames, transformersTablesSchema, transformersParameters, transformerRulesConfig, args, collectionKey, fileList):
# Function to iterate on getReShapedDataframe to reShape some dataframes accordingly with targetTableNames
# dataFrames is expected to be a Hash Table of dataframes
# targetTableNames is expected to be a list with the right keys from the Hash table dataFrames
if transformersParameters.get('op_enable_reshape_for') is not None:
# if the parameter is set to any value
executedRulesList = []
for tableName_RuleID in transformersParameters.get('op_enable_reshape_for').split(','):
# This parameter accepted multiple values
tableName = str(tableName_RuleID).split(':')[0]
ruleID = str(tableName_RuleID).split(':')[1]
resCSVCreation = False
transformerParameterResults, transformersResults, fileList, dataFrames = runRules(transformerRulesConfig, dataFrames, ruleID, args, None, transformersTablesSchema, fileList, executedRulesList, transformersParameters)
print('Reshaping Rule Executed: {} for the table name {}'.format(ruleID,tableName))
# Including runes already executed to be avoided
executedRulesList.append(ruleID)
if dataFrames.get(str(tableName)) is not None:
if transformersResults.get(str(tableName)) is not None:
reshapedTableName = str(tableName).lower() + '_rs'
try:
df = getReShapedDataframe(dataFrames[str(tableName)], transformersResults[str(tableName)])
dataFrames[reshapedTableName.upper()] = df
except:
df = None
print('WARNING: Optimus Prime could not ReShape the table {} due to a fatal error.\n'.format(tableName))
# collectionKey already contains .log
fileName = str(getattr(args,'fileslocation')) + '/opdbt__' + reshapedTableName + '__' + str(collectionKey)
if df is not None:
# Writes CSVs from Dataframes when parameter store in CSV_ONLY or BIGQUERY
resCSVCreation, transformersTablesSchema = createCSVFromDataframe(dataFrames[reshapedTableName.upper()], transformersResults[str(tableName)], args, fileName, transformersTablesSchema, str(reshapedTableName).lower(), True)
if resCSVCreation:
# If CSV creation was successfully then we will add this to the list of files to be imported
fileList.append(fileName)
else:
print ('\nThere is not parameter set to define the reshape process for: {}'.format(str(tableName)))
print ('This is all valid reshape configurations found: {}\n'.format(str(transformersParameters.keys())))
else:
print ('\nWARNING: There is no data parsed from CSVs named {}'.format(str(tableName)))
print ('WARNING: This is all valid CSVs names {}\n'.format(str(dataFrames.keys())))
return dataFrames, fileList, transformersTablesSchema, executedRulesList
def createCSVFromDataframe(df, transformersParameters, args, fileName, transformersTablesSchema, tableName, fixDataframeColumns):
if transformersParameters['store'] in ('CSV_ONLY', 'BIGQUERY'):
#STEP: Creating 1 row empty in the file
# Make sure file will have same format (skipping first line as others) as the ones coming from oracle_db_assessment.sql
df1 = | pd.DataFrame({'a':[np.nan] * 1}) | pandas.DataFrame |
import multiprocessing
import os
import time
from datetime import datetime, timedelta
import sys
from functools import partial
import mongo_proxy
from pymongo import UpdateOne, ReplaceOne, DeleteMany, MongoClient
sys.path.extend([sys.argv[1]])
import settings
from mongo_orm import MongoDB, AnyField
from project_customization.flexcoop.models import DataPoint, Device
from project_customization.flexcoop.reports.telemetry_usage import get_data_model
from project_customization.flexcoop.timeseries_utils import timeseries_mapping, indoor_sensing, occupancy, meter, \
status_devices, device_status, atw_heatpumps
from project_customization.flexcoop.utils import convert_snake_case
import pandas as pd
import numpy as np
import pytz
"""We define the cronjobs to be executed to deal with the raw data recieved"""
#define the final timeseries models:
timezone = pytz.timezone("Europe/Madrid")
NUM_PROCESSES = 10
DEVICES_BY_PROC = 10
device_exception = ["76f899f2-323b-11ea-92d1-ac1f6b403fbc"]
def no_outliers_stats(series, lowq=2.5, highq=97.5):
hh = series[(series <= np.nanquantile(series, highq/100))& (series >= np.nanquantile(series, lowq/100))]
return {"mean": hh.mean(), "median": hh.median(), "std": hh.std()}
def clean_znorm_data(series, th, lowq=2.5, highq=97.5):
series1 = series.round(2).value_counts()
series1 = series1 / series1.sum()
series2 = series.copy()
for c in series1.iteritems():
if c[1] > 0.20:
series2 = series[series.round(2) != c[0]]
else:
break
stats = no_outliers_stats(series2, lowq, highq)
zscore = np.abs( (series - stats['median']) / stats['std'])
return series[zscore < th]
def znorm_value(series, window, th, lowq=2.5, highq=97.5):
val_index = int(window / 2)
if len(series) < val_index:
return 0
current = series.iloc[val_index]
stats = no_outliers_stats(series, lowq, highq)
if np.isnan(stats['std']):
zscore = 0
else:
zscore = np.abs((current - stats['median']) / stats['std'])
return zscore
def clean_znorm_window(series, th, lowq=2.5, highq=97.5):
zscore = series.rolling(window=49, center=True, min_periods=1).apply(znorm_value, raw=False, args=(49, th))
return series[zscore < th]
def clean_threshold_data(series, min_th=None, max_th=None):
if min_th is not None and max_th is not None:
return series[(min_th<=series) & (series<=max_th)]
elif min_th is not None:
return series[series >= min_th]
elif max_th is not None:
return series[series <= max_th]
else:
return series
def cleaning_data(series, period, operations):
df = pd.DataFrame(series)
for operation in operations:
if operation['type'] == 'threshold':
df.value = clean_threshold_data(df.value, min_th=operation['params'][0], max_th=operation['params'][1])
if operation['type'] == "znorm":
df.value = clean_znorm_data(df.value, operation['params'])
# if period == "backups":
# #print(len(series))
# df.value = clean_znorm_window(df.value, operation['params'])
return df.value
def clean_device_data_status(today, now, devices):
conn = mongo_proxy.MongoProxy(MongoClient(settings.MONGO_URI))
databasem = conn.get_database("flexcoop")
devicep = databasem['devices']
for device in devices:
print("starting ", device)
point = devicep.find_one({"device_id": device})
if not point:
continue
device_df = []
#fdsfa
for key in point['status'].keys():
try:
database = "{}_{}".format("status",convert_snake_case(key))
value = status_devices[database]
except:
continue
raw_model = databasem[database]
data = list(raw_model.find({"device_id": device}))
print("readed data ", key)
if not data:
continue
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
account_id = df.account_id.unique()[0]
aggregator_id = df.aggregator_id.unique()[0]
device_class = point['rid']
# instant values, expand the value tu the current time
df = df[['value']].append(pd.DataFrame({"value": np.nan}, index=[now]))
data_clean = df.fillna(method="pad")
if data_clean.empty:
continue
df = pd.DataFrame(data_clean)
df = df.rename(columns={"value": value['field']})
device_df.append(df)
print("treated data")
#fdsafdf
if device_df:
device_df_final = device_df.pop(0)
device_df_final = device_df_final.join(device_df, how="outer")
device_df_final = device_df_final.fillna(method="pad")
device_df_final['account_id'] = account_id
device_df_final['aggregator_id'] = aggregator_id
device_df_final['device_class'] = device_class
device_df_final['device_id'] = device
device_df_final['timestamp'] = device_df_final.index.to_pydatetime()
device_df_final['_created_at'] = datetime.utcnow()
device_df_final['_updated_at'] = datetime.utcnow()
device_df_final = device_df_final[device_df_final.index >= today.replace(tzinfo=None)]
df_ini = min(device_df_final.index)
df_max = max(device_df_final.index)
documents = device_df_final.to_dict('records')
print("writting_status_data {}".format(len(documents)))
databasem['device_status'].delete_many({"device_id": device, "timestamp": {"$gte":df_ini.to_pydatetime(), "$lte": df_max.to_pydatetime()}})
databasem['device_status'].insert_many(documents)
def aggregate_device_status(now):
print("********* START STATUS CLEAN {} *************", datetime.now())
today = timezone.localize(datetime(now.year,now.month,now.day)).astimezone(pytz.UTC)
devices = set()
for key, value in status_devices.items():
raw_model = get_data_model(key)
devices.update(raw_model.__mongo__.distinct("device_id"))
devices = list(devices)
# iterate for each device to obtain the clean data of each type.
a_pool = multiprocessing.Pool(NUM_PROCESSES)
devices_per_thread = DEVICES_BY_PROC;
a_pool.map(partial(clean_device_data_status, today, now), [devices[x:x+devices_per_thread] for x in range(0, len(devices), devices_per_thread)])
print("********* END STATUS CLEAN {} *************", datetime.now())
"""
data_clean = pd.DataFrame(df.value.resample("1s").mean())
mask = pd.DataFrame(data_clean.copy())
data_clean = mask.copy()
grp = ((mask.notnull() != mask.shift().notnull()).cumsum())
grp['ones'] = 1
mask['value'] = (grp.groupby('value')['ones'].transform('count') < 3600) | data_clean['value'].notnull()
data_clean.value = data_clean.value.interpolate(limit_direction="backward")[mask.value].diff()
data_clean.value = clean_threshold_data(data_clean.value, 0 , 0.004166)
data_clean_value = data_clean.value.resample(freq).mean()
data_clean_value = data_clean_value * 60 * 15
data_clean = pd.DataFrame(data_clean_value)
plt.plot(data_clean.value)
plt.show()
"""
def clean_device_data_timeseries(today, now, last_period, freq, period, device):
conn = MongoClient(settings.MONGO_URI)
database = conn.get_database("flexcoop")
datap = database['data_points']
print("starting ", device)
point = datap.find_one({"device_id": device})
if not point:
conn.close()
return
atw_heatpumps_df = []
indoor_sensing_df = []
occupancy_df = []
meter_df = []
for key in point['reporting_items'].keys():
try:
value = timeseries_mapping[key]
except:
continue
raw_model = database[key]
data = list(raw_model.find({"device_id": device, "dtstart":{"$lte":now.strftime("%Y-%m-%dT%H:%M:%S.%f"), "$gte": last_period.strftime("%Y-%m-%dT%H:%M:%S.%f")}}))
if not data:
#no data in the last period, get the last value ever.
print("nodata")
data =list(raw_model.find({"device_id": device, "dtstart": {"$lte": now.strftime("%Y-%m-%dT%H:%M:%S.%f")}}))
if not data:
print("nodata2")
continue
else:
print("data2")
#get the last value of the request
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
df = df.iloc[[-1]]
else:
df = pd.DataFrame.from_records(data)
df.index = pd.to_datetime(df.dtstart, errors='coerce')
df = df[~df.index.isna()]
df = df.sort_index()
# get the data_point information
point_info = point['reporting_items'][key]
reading_type = point_info['reading_type']
account_id = df.account_id.unique()[0]
aggregator_id = df.aggregator_id.unique()[0]
device_class = point['rid']
df = df.loc[~df.index.duplicated(keep='last')]
print("readed data ", key)
if reading_type == "Direct Read":
if value['operation'] == "SUM":
try:
df.value = pd.to_numeric(df.value)
except:
print("AVG is only valid for numeric values")
continue
data_check = df.value.diff()
data_clean = df.value[data_check.shift(-1) >=0]
data_clean = data_clean[data_check >= 0]
data_clean = pd.DataFrame(data_clean.resample("1s").mean())
data_clean['verified'] = data_clean.value.notna()
data_clean.verified = data_clean.verified[data_clean.value.notna()]
copy = pd.DataFrame(data_clean.value.resample("3H", label='right').max())
copy['verified'] = False
copy.value = copy.value.fillna(method='ffill')
data_clean = pd.concat([data_clean, copy], sort=True)
data_clean = data_clean[~data_clean.index.duplicated(keep='last')]
data_clean = data_clean.sort_index()
data_clean.value = data_clean.value.interpolate(limit_direction="backward").diff()
data_clean['verified_0'] = data_clean.verified.fillna(method='ffill')
data_clean['verified_1'] = data_clean.verified.fillna(method='bfill')
data_clean['verified'] = data_clean.verified_0 & data_clean.verified_1
data_clean.value = clean_threshold_data(data_clean.value, 0 , 0.004166)
data_clean_value = data_clean.value.resample(freq).mean()
data_clean_value = data_clean_value * 60 * 15
data_clean_verified = data_clean.verified.resample(freq).apply(all)
data_clean = pd.DataFrame(data_clean_value)
data_clean['verified_kwh'] = data_clean_verified
else:
data_clean = pd.DataFrame()
elif reading_type == "Net":
# instant values, expand the value tu the current time
df = df[['value']].append(pd.DataFrame({"value": np.nan}, index=[now]))
if value['operation'] == "AVG":
# average is applied to numeric values
try:
df.value = pd.to_numeric(df.value)
except:
print("AVG is only valid for numeric values")
continue
data_clean = df.resample("1s").pad().dropna().resample(freq).mean()
if value['cleaning'] and not data_clean.empty:
data_clean.value = cleaning_data(data_clean.value, period, value['cleaning'])
elif value['operation'] == "FIRST":
# first is applied to all types
data_clean = df.resample("1s").pad().dropna().resample(freq).first()
elif value['operation'] == "MAX":
# max is applied to numeric values
try:
df.value = pd.to_numeric(df.value)
except:
print("MAX is only valid for numeric values")
continue
data_clean = df.resample("1s").pad().dropna().resample(freq).max()
if value['cleaning'] and not data_clean.empty:
data_clean.value = cleaning_data(data_clean.value, period, value['cleaning'])
else:
data_clean = pd.DataFrame()
else:
data_clean = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# @Author : <NAME>
# @FileName : meth_stats_tool.py
# @Software : NANOME project
# @Organization : JAX Li Lab
# @Website : https://github.com/TheJacksonLaboratory/nanome
"""
Tool for pre-processing results
"""
import argparse
import glob
import gzip
import sys
from collections import defaultdict
from multiprocessing import Pool
import h5py
import numpy as np
import pandas as pd
from Bio import SeqIO
from ont_fast5_api.fast5_interface import get_fast5_file
from tqdm import tqdm
from nanocompare.eval_common import load_tombo_df, load_deepmod_df, get_dna_base_from_reference, \
load_sam_as_strand_info_df, load_nanopolish_df
from nanocompare.global_config import *
from nanocompare.global_settings import humanChrSet
def add_strand_info_for_nanopolish(
nanopolish_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.methylation_calls.tsv',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sam'):
"""
No need for new nanopolish output
Combine the nanopolish output tsv results with strand-info from SAM files. This will add last column as strand-info.
This is due to original nanopolish output results contain no strand-info, we are going to solve this problem.
Return results columns are:
[(0, 'chromosome'), (1, 'start'), (2, 'end'), (3, 'read_name'), (4, 'log_lik_ratio'), (5, 'log_lik_methylated'), (6, 'log_lik_unmethylated'), (7, 'num_calling_strands'), (8, 'num_cpgs'), (9, 'sequence'), (10, 'strand-info')]
:param nanopolish_fn: nanopolish file name
:param sam_fn: SAM file name for strand-info
:return:
"""
if args.i is not None:
nanopolish_fn = args.i
if args.ibam is not None:
sam_fn = args.ibam
df2 = load_sam_as_strand_info_df(infn=sam_fn)
df1 = load_nanopolish_df(infn=nanopolish_fn)
df = df1.merge(df2, left_on='read_name', right_on='read-name', how='left')
df = df.drop('read-name', axis=1)
logger.info(df)
logger.info(list(enumerate(df.columns)))
if len(df1) != len(df):
raise Exception(
"We found the read-name of Nanopolish results is not mapped all to SAM/BAM file, please check if the BAM file is used for Nanopolish")
# df = df.iloc[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
outfn = os.path.join(pic_base_dir,
f'{os.path.splitext(os.path.basename(nanopolish_fn))[0]}-nanopolish-strand-info.tsv')
df.to_csv(outfn, sep='\t', index=False)
logger.info(f'save to {outfn}')
return df
def sanity_check_get_dna_seq(chrstr):
"""
Check 0-based start, input as 'chr1:123'
:param chrstr:
:return:
"""
chr, start = chrstr.strip().split(':')
start = int(start)
show_arrow = ''.join(['~'] * 5 + ['↑'] + ['~'] * 5)
ret = get_dna_base_from_reference(chr, start, ref_fasta=ref_fasta)
logger.info(f'chr={chr}, start={start}\nSEQ={ret}\nPOS={show_arrow}')
def filter_noncg_sites_ref_seq(df, tagname, ntask=1, ttask=1, num_seq=5, chr_col=0, start_col=1, strand_col=5,
toolname='tombo'):
"""
Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files
from SAM to BAM (with index) script is as follows:
samtools view -S -b K562.sam > K562.bam
samtools sort -o K562.sorted.bam K562.bam
samtools index K562.sorted.bam
:param tombo_fn:
:param sam_fn:
:return:
"""
chrs = df.iloc[:, chr_col].unique()
chrs = np.sort(chrs)
logger.info(chrs)
logger.info(len(chrs))
all_list = list(range(len(df)))
cpg_pattern_index = subset_of_list(all_list, ntask, ttask)
# sel_chrs = subset_of_list(chrs, ntask, ttask)
# logger.info(sel_chrs)
# df = df[df[0].isin(sel_chrs)]
df = df.iloc[cpg_pattern_index, :]
logger.info(df)
rep_chr = df.iloc[0, chr_col]
seq_col = []
cpg_pattern_index = []
print_first = True
for index, row in tqdm(df.iterrows()):
if print_first:
logger.info(f"index={index}, row={row}")
print_first = False
chr = row[chr_col]
start = int(row[start_col])
strand_info = row[strand_col]
# ret = get_dna_sequence_from_samfile(chr, start, start + num_seq, samfile) # may return None, if no sequence at all reads
ret = get_dna_base_from_reference(chr, start, num_seq=num_seq, ref_fasta=ref_fasta)
seq_col.append(ret)
if toolname == 'tombo':
if ret[5:7] == 'CG':
cpg_pattern_index.append(index)
elif toolname == 'deepmod':
if strand_info == '+':
if ret[5:7] == 'CG':
cpg_pattern_index.append(index)
elif strand_info == '-':
if ret[4:6] == 'CG':
cpg_pattern_index.append(index)
# TODO: using ret if it is CG pattern, or will remove later
# logger.info(f'chr={chr}, start={start}, strand={strand_info}, ret={ret}')
# if index > 10000:
# break
df['sequence'] = seq_col
logger.debug(f'before filter:{len(df)}, after non-CG filter:{len(cpg_pattern_index)}')
df = df.loc[cpg_pattern_index, :]
# tagname is like 'K562.tombo.perReadsStats.combine'
# then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv'
outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv')
df.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
def filter_noncg_sites_ref_seq_mpi(df, tagname, ntask=1, ttask=1, num_dna_seq=5, chr_col=0, start_col=1, strand_col=5,
toolname='tombo', print_first=False):
"""
MPI version
invoke like: res = p.apply_async(testFunc, args=(2, 4), kwds={'calcY': False})
or pool.apply_async(test, (t,), dict(arg2=5))
Filter out rows that are non-CG patterns in Tombo results, reference sequence is based on BAM files
:param tombo_fn:
:param sam_fn:
:return:
"""
rep_chr = df.iloc[0, chr_col]
seq_col = []
only_cpg_pattern_index = []
for index, row in df.iterrows():
if print_first:
logger.info(f"index={index}, row={row}")
print_first = False
chr = row[chr_col]
start = int(row[start_col])
strand_info = row[strand_col]
ret = get_dna_base_from_reference(chr, start, num_seq=num_dna_seq, ref_fasta=ref_fasta)
seq_col.append(ret)
if toolname == 'tombo':
if ret[5:7] == 'CG':
only_cpg_pattern_index.append(index)
elif toolname in ['deepmod', 'deepmod-read-level']:
if strand_info == '+':
if ret[5:7] == 'CG':
only_cpg_pattern_index.append(index)
elif strand_info == '-':
if ret[4:6] == 'CG':
only_cpg_pattern_index.append(index)
df['sequence'] = seq_col
# logger.debug(f'Subprocess [{ttask}:{ntask}] finished, before filter:{len(df)}, after non-CG filter:{len(only_cpg_pattern_index)}')
df = df.loc[only_cpg_pattern_index, :]
# tagname is like 'K562.tombo.perReadsStats.combine'
# then outfn is like 'K562.tombo.perReadsStats.combine-with-seq-info-n300-t001-chr1.tsv'
# outfn = os.path.join(args.o, f'{tagname}-with-seq-info-n{ntask}-t{ttask:03d}-{rep_chr}.tsv')
# df.to_csv(outfn, sep='\t', header=False, index=False)
# logger.info(f"save to {outfn}")
logger.info(f"Finished of subprocess {ttask}:{ntask}")
return df
def filter_noncg_sites_for_tombo(
tombo_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.tombo_perReadsStats.bed',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5):
if args.i is not None:
tombo_fn = args.i
df = load_tombo_df(infn=tombo_fn)
basefn = os.path.basename(tombo_fn)
basename = os.path.splitext(basefn)[0]
filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq)
def convert_bismark_add_strand_and_seq(indf, outfn, report_num=None):
"""
Check start pointer, if point to CG's C, it is positive strand, or else, it is reverse strand
Note: input file is 1-based start, we also output to a 1-based format that is compatable to our Bismark import functions.
:param indf:
:param outf:
:param report_num:
:return:
"""
logger.debug(f'Start add strand and seq to bismark cov file, total len={len(indf)}')
outf = gzip.open(outfn, 'wt')
for index, row in tqdm(indf.iterrows()):
# if report_num and index % report_num == 0:
# logger.debug(f'processed index={index}')
chr = row['chr']
start = int(row['start']) # Keep raw 1-based format of bismark results
ret = get_dna_base_from_reference(chr, start - 1, ref_fasta=ref_fasta)
if ret[5] == 'C': # strand is +
strand = '+'
elif ret[5] == 'G':
strand = '-'
else:
raise Exception(f'We can not identify this bg-truth file with non-CG results, such as row={row}')
outstr = '\t'.join([chr, str(start), strand, str(row['mcount']), str(row['ccount']), ret[4:7]])
outf.write(f'{outstr}\n')
outf.close()
logger.info(f'save to {outfn}')
logger.debug(f'Finish add strand info task')
def convert_bismark_cov_to_gw_format(df):
"""
Save adding strand info and dna seq format, which is in same format of Bismark Genome-wide output files
:param df:
:return:
"""
basefn = os.path.basename(args.i)
basename = os.path.splitext(basefn)[0]
outfn = os.path.join(args.o, f'{basename}.convert.add.strand.tsv.gz')
convert_bismark_add_strand_and_seq(df, outfn)
def filter_noncg_sites_mpi(df, ntask=300, toolname='tombo'):
"""
MPI version of filter out non-CG patterns
:return:
"""
basefn = os.path.basename(args.i)
basename = os.path.splitext(basefn)[0]
all_list = list(range(len(df)))
# Store each sub-process return results
df_list = []
with Pool(processes=args.processors) as pool:
for epoch in range(ntask):
cpg_pattern_index = subset_of_list(all_list, ntask, epoch + 1)
seldf = df.iloc[cpg_pattern_index, :]
if toolname == 'tombo':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1)))
elif toolname == 'deepmod':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1),
dict(chr_col=0, start_col=1, strand_col=5, toolname='deepmod')))
elif toolname == 'deepmod-read-level':
df_list.append(pool.apply_async(filter_noncg_sites_ref_seq_mpi, (seldf, basename, ntask, epoch + 1),
dict(chr_col=0, start_col=1, strand_col=5,
toolname='deepmod-read-level')))
else:
raise Exception(f"{toolname} is no valid.")
pool.close()
pool.join()
# Combine df
logger.debug("Start to combine all results")
df_list = [df1.get() for df1 in df_list]
retdf = pd.concat(df_list)
logger.debug(retdf)
## Note: original input=K562.tombo.perReadsStats.combine.tsv
## output=K562.tombo.perReadsStatsOnlyCpG.combine.tsv
if toolname == 'tombo':
basefn = basefn.replace("perReadsStats", "perReadsStatsOnlyCG").replace("combined", "combine")
elif toolname == 'deepmod':
## Example: HL60.deepmod.C.combined.tsv
basefn = basefn.replace(".C.", ".C_OnlyCG.").replace("combined", "combine")
else:
raise Exception(f"{toolname} is no valid.")
outfn = os.path.join(args.o, f'{basefn}')
retdf.to_csv(outfn, sep='\t', index=False, header=False)
logger.debug(f"Save to {outfn}")
def filter_noncg_sites_for_deepmod(
deepmod_fn='/projects/li-lab/yang/workspace/nano-compare/data/tools-call-data/K562/K562.deepmod_combined.bed',
sam_fn='/projects/li-lab/yang/results/12-09/K562.nanopolish/K562.sorted.bam', ntask=1, ttask=1, num_seq=5):
if args.i is not None:
deepmod_fn = args.i
df = load_deepmod_df(infn=deepmod_fn)
basefn = os.path.basename(deepmod_fn)
basename = os.path.splitext(basefn)[0]
filter_noncg_sites_ref_seq(df=df, tagname=basename, ntask=ntask, ttask=ttask, num_seq=num_seq, chr_col=0,
start_col=1, strand_col=5, toolname='deepmod')
def subset_of_list(alist, n, t):
"""
Subset of a list for multi-processing
n=1 to 100
t=1 to N
return subset list of alist
:param alist:
:param n:
:param t:
:return:
"""
if t < 1 or t > n:
raise Exception(f't={t} is not accept, must be 1-N (include)')
if n > len(alist): # if n is bigger than all list, return only 1 for t<=len
if t <= len(alist):
return [alist[t - 1]]
else:
return None
m = int(len(alist) / n) # each task of a section of list
start_index = int((t - 1) * m)
if t == n:
sublist = alist[start_index:]
else:
sublist = alist[start_index:start_index + m]
# logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}')
return sublist
def get_f5_readid_map(flist):
f5_readid_map = defaultdict(str)
for fn in flist:
basename = os.path.basename(fn)
with get_fast5_file(fn, mode="r") as f5:
# if len(f5.get_reads()) >= 2:
# raise Exception(f'We can only deal with one read in fast5, but fn={fn}, contains {len(f5.get_reads())} multiple reads')
for read in f5.get_reads():
f5_readid_map[basename] = str(read.read_id)
return f5_readid_map
def build_map_fast5_to_readid_mp(
basedir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall', ntask=300):
patfn = os.path.join(basedir, '**', '*.fast5')
fast5_flist = glob.glob(patfn, recursive=True)
logger.info(f'Total fast5 files: {len(fast5_flist)}')
ret_list = []
with Pool(processes=args.processors) as pool:
for epoch in range(ntask):
subflist = subset_of_list(fast5_flist, ntask, epoch + 1)
ret_list.append(pool.apply_async(get_f5_readid_map, (subflist,)))
pool.close()
pool.join()
logger.debug('Finish fast5 to read-id mapping')
f5_readid_map = defaultdict(str)
for ret in ret_list:
f5_readid_map.update(ret.get())
# for fn in fast5_flist[:]:
# # logger.debug(fn)
# basename = os.path.basename(fn)
#
# with get_fast5_file(fn, mode="r") as f5:
# for read in f5.get_reads():
# # logger.debug(read.read_id)
# f5_readid_map[basename] = str(read.read_id)
return f5_readid_map
def process_pred_detail_f5file(fn, f5_readid_map):
"""
For each deepmod prediction results file, we analyze a df result of read-level results
:param fn:
:param f5_readid_map:
:return:
"""
f5_pred_key = '/pred/pred_0/predetail'
dflist = []
with h5py.File(fn, 'r') as mr:
# m_pred = mr[f5_pred_key].value
# logger.debug(m_pred)
for name in mr['/pred']:
# logger.debug(name)
pred_num_key = f'/pred/{name}'
f5file = os.path.basename(mr[pred_num_key].attrs['f5file'])
mapped_chr = mr[pred_num_key].attrs['mapped_chr']
mapped_strand = mr[pred_num_key].attrs['mapped_strand']
# logger.debug(f'{pred_num_key}: chr={mapped_chr}, strand={mapped_strand}, f5file={f5file}')
pred_detail_key = f'{pred_num_key}/predetail'
# m_pred = mr[pred_detail_key].value
m_pred = mr[pred_detail_key][()]
m_pred = np.array(m_pred, dtype=[('refbase', 'U1'), ('readbase', 'U1'), ('refbasei', np.uint64),
('readbasei', np.uint64), ('mod_pred', np.int)])
dataset = []
for mi in range(len(m_pred)):
if m_pred['refbase'][mi] not in ['C']:
continue
if m_pred['refbase'][mi] in ['-', 'N', 'n']:
continue
# if m_pred['readbase'][mi] == '-':
# continue
# Filter non-CG patterns results
ret = get_dna_base_from_reference(mapped_chr, int(m_pred['refbasei'][mi]), ref_fasta=ref_fasta)
if mapped_strand == '+':
if ret[5:7] != 'CG':
continue
elif mapped_strand == '-':
if ret[4:6] != 'CG':
continue
if -0.1 < m_pred['mod_pred'][mi] - 1 < 0.1:
meth_indicator = 1
else:
meth_indicator = 0
# sp_options['4NA'][m_pred['refbase'][mi]][(cur_chr, cur_strand, int(m_pred['refbasei'][mi]) )][0] += 1
ret = {'start': int(m_pred['refbasei'][mi]), 'pred': meth_indicator, 'base': m_pred['refbase'][mi],
'sequence': ret}
dataset.append(ret)
df = pd.DataFrame(dataset)
if len(df) < 1:
continue
df['chr'] = str(mapped_chr)
df['end'] = df['start'] + 1
df['strand'] = str(mapped_strand)
df['read-id'] = f5_readid_map[f5file]
df = df[['chr', 'start', 'end', 'read-id', 'base', 'strand', 'sequence', 'pred']]
# logger.info(df)
dflist.append(df)
sumdf = pd.concat(dflist)
# logger.debug(f'Process pred detail file {fn} finished, total reads={len(sumdf)}.')
return sumdf
def extract_deepmod_read_level_results_mp(
basecallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall',
methcallDir='/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall', ntask=50):
f5_readid_map = build_map_fast5_to_readid_mp(basedir=basecallDir, ntask=ntask)
# logger.debug(f5_readid_map)
# dirname = '/fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall/**/rnn.pred.detail.fast5.*'
dirname = os.path.join(methcallDir, '**', 'rnn.pred.detail.fast5.*')
fast5_flist = glob.glob(dirname, recursive=True)
logger.info(f'Total deepmod fast5 files:{len(fast5_flist)}')
dflist = []
with Pool(processes=args.processors) as pool:
for fn in fast5_flist[:]:
# df = process_pred_detail_f5file(fn, f5_readid_map)
dflist.append(pool.apply_async(process_pred_detail_f5file, (fn, f5_readid_map,)))
# logger.debug(df)
# logger.debug(df.iloc[1, :])
# logger.debug(fn)
# pass
pool.close()
pool.join()
dflist = [df.get() for df in dflist]
sumdf = pd.concat(dflist)
logger.debug('Finish get df from deepmod fast5 predetail files')
cpgDict = defaultdict(lambda: [0, 0]) # 0:cov, 1:meth-cov
for index, row in sumdf.iterrows():
chr = row['chr']
start = row['start']
strand = row['strand']
basekey = (chr, start, strand)
cpgDict[basekey][0] += 1
if row['pred'] == 1:
cpgDict[basekey][1] += 1
logger.debug(f'CpG sites={len(cpgDict)}')
dataset = []
for site in cpgDict:
ret = {'chr': site[0], 'start': site[1], 'end': site[1] + 1, 'base': 'C', 'cap-cov': cpgDict[site][0],
'strand': site[2], 'no-use1': '', 'start1': site[1], 'end1': site[1] + 1, 'no-use2': '0,0,0',
'cov': cpgDict[site][0], 'meth-freq': int(100 * cpgDict[site][1] / cpgDict[site][0]),
'meth-cov': cpgDict[site][1]}
dataset.append(ret)
beddf = pd.DataFrame(dataset)
beddf = beddf[
['chr', 'start', 'end', 'base', 'cap-cov', 'strand', 'no-use1', 'start1', 'end1', 'no-use2', 'cov', 'meth-freq',
'meth-cov']]
logger.debug('Finish bed df, extract all DONE.')
return sumdf, beddf
def parse_arguments():
"""
:return:
"""
parser = argparse.ArgumentParser(description='Multi-task')
parser.add_argument("cmd", help="name of command: compute, combine, or gen-pixel-info")
parser.add_argument('-n', type=int, help="the total number of tasks (1-27)", default=1)
parser.add_argument('-t', type=int, help="the current task id (1-N)", default=1)
parser.add_argument('-i', type=str, help="input file", default=None)
parser.add_argument('-o', type=str, help="output dir or file", default=pic_base_dir)
parser.add_argument('--o2', type=str, help="second output dir or file", default=None)
parser.add_argument('--ibam', type=str, help="input bam/sam file", default=None)
parser.add_argument('--basecallDir', type=str, help="basecallDir dir name", default=None)
parser.add_argument('--methcallDir', type=str, help="methcallDir dir name", default=None)
parser.add_argument('--processors', type=int, help="Number of processors", default=8)
parser.add_argument('--mpi', action='store_true')
parser.add_argument('--chrs', nargs='+', help='all chrs need to check', default=[])
return parser.parse_args()
def output_bed_by_bin(bin_id):
num_bins = 5
density_col = 4
output_cols = [0, 1, 2]
bin_value = int(bin_id / num_bins * 100 + 1e-5)
logger.info(f"start with bin_id={bin_id}, bin_value={bin_value}")
ndf = df[df[density_col] == bin_value]
ndf = ndf.iloc[:, output_cols]
logger.info(f"start to save, df={len(df):,}, ndf={len(ndf):,}, for bin_value={bin_value}")
outfn = os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz")
ndf.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
def output_bed_by_bin2(infn, num_bins):
inf = gzip.open(infn, 'rt')
outf_list = []
for bin_id in range(0, num_bins + 1):
bin_value = int(bin_id / num_bins * 100 + 1e-5)
outf_list.append(gzip.open(os.path.join(args.o, f"hg38.gc5Base.bin{bin_value}.bed.gz"), 'wt'))
for row in tqdm(inf):
tmp = row.strip().split("\t")
density_col = 4
bin_value = int(float(tmp[density_col]) + 1e-5)
bin_id = bin_value // 20
if bin_id not in range(0, num_bins + 1):
logger.error(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}")
raise Exception(f"Error found: bin_value={bin_value}, bin_id={bin_id}, for row={row}")
outf_list[bin_id].write(f"{tmp[0]}\t{tmp[1]}\t{tmp[2]}\n")
[outf.close for outf in outf_list]
logger.info("Finished bin bed for gc density")
def save_tss_bed_for_5hmc(infn, outfn):
logger.info(f"open infn={infn}")
df = pd.read_csv(infn, sep='\t', header=None)
logger.debug(df)
df = df.iloc[:, [0, 1, 2, 4, 7]]
df.columns = ['chr', 'start', 'end', '5hmc_level', 'strand']
df['n1'] = '.'
df['start'] = df['start'].astype(int) - 1
df['end'] = df['end'].astype(int) - 1
df['5hmc_level'] = df['5hmc_level'].astype(float)
df = df[['chr', 'start', 'end', '5hmc_level', 'n1', 'strand']]
logger.info(f"df['5hmc_level'] = {df['5hmc_level'].describe()}")
logger.info(f"len(df['5hmc_level'] >= 1.0) = {(df.loc[:, '5hmc_level'] >= 1.0 - 1e-3).sum()}")
df.to_csv(outfn, sep='\t', header=False, index=False)
logger.info(f"save to {outfn}")
pass
if __name__ == '__main__':
set_log_debug_level()
args = parse_arguments()
logger.debug(args)
ref_fasta = None
if args.cmd in ['tombo-add-seq', 'deepmod-add-seq', 'deepmod-read-level', 'sanity-check-seq',
'bismark-convert']: # These command will use reference genome
ref_fn = '/projects/li-lab/Ziwei/Nanopore/data/reference/hg38.fa'
ref_fasta = SeqIO.to_dict(SeqIO.parse(open(ref_fn), 'fasta'))
if args.cmd == 'tombo-add-seq':
if args.mpi:
logger.debug('in mpi mode')
import multiprocessing
logger.debug(
"There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count())
df = load_tombo_df(infn=args.i)
filter_noncg_sites_mpi(df)
else:
filter_noncg_sites_for_tombo(ntask=args.n, ttask=args.t)
elif args.cmd == 'deepmod-add-seq':
if args.mpi:
logger.debug('in mpi mode')
import multiprocessing
logger.debug(
"There are %d CPUs on this machine by multiprocessing.cpu_count()" % multiprocessing.cpu_count())
df = load_deepmod_df(infn=args.i)
filter_noncg_sites_mpi(df, toolname='deepmod')
else:
filter_noncg_sites_for_deepmod(ntask=args.n, ttask=args.t)
elif args.cmd == 'nanopolish-add-strand':
add_strand_info_for_nanopolish()
elif args.cmd == 'sanity-check-seq':
## bash meth_stats_tool.sh sanity-check-seq --chrs chr4:10164 chr4:10298
for chrstr in args.chrs:
# logger.info(chrstr)
sanity_check_get_dna_seq(chrstr)
elif args.cmd == 'deepmod-read-level':
### Running bash:
"""
sbatch meth_stats_tool_mpi.sh deepmod-read-level --basecallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-basecall --methcallDir /fastscratch/liuya/nanocompare/K562-Runs/K562-DeepMod-N50/K562-DeepMod-N50-methcall -o /fastscratch/liuya/nanocompare/deepmod-read-level1.tsv --o2 /fastscratch/liuya/nanocompare/deepmod-read-level1-extract-output.bed
"""
sumdf, beddf = extract_deepmod_read_level_results_mp(basecallDir=args.basecallDir, methcallDir=args.methcallDir)
logger.info(sumdf)
logger.info(sumdf.iloc[1, :])
logger.info(sumdf['chr'].unique())
# outfn = os.path.join('/fastscratch/liuya/nanocompare/', 'deepmod-read-level.tsv')
# Save read level results
outfn = args.o
sumdf.to_csv(outfn, sep='\t', index=False, header=False)
logger.info(f'save to {outfn}')
if args.o2: # Save CpG base level results bed file for cluster module use
outfn = args.o2
beddf.to_csv(outfn, sep=' ', index=False, header=False)
logger.info(f'save to {outfn}')
elif args.cmd == 'bismark-convert': # Convert non-strand info bismark to strand
## bash meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed
## sbatch meth_stats_tool.sh bismark-convert -i /pod/2/li-lab/Ziwei/Nanopore_methyl_compare/result/APL_BSseq/APL-bs_R1_val_1_bismark_bt2_pe.deduplicated.sorted.bed
df = pd.read_csv(args.i, sep='\t', header=None)
if len(df.columns) != 6:
raise Exception(f"Can no recognize input file format for infn={args.i}, df={df}")
df.columns = ['chr', 'start', 'end', 'freq100', 'mcount', 'ccount']
logger.debug(df)
convert_bismark_cov_to_gw_format(df)
elif args.cmd == 'gc-density-bed':
# sbatch meth_stats_tool.sh gc-density-bed
infn = "/projects/li-lab/yang/workspace/nano-compare/data/genome-annotation/hg38.gc5Base.bed.gz"
output_bed_by_bin2(infn, num_bins=5)
if True:
sys.exit(0)
df = | pd.read_csv(infn, sep='\t', header=None) | pandas.read_csv |
import pandas as pd
import numpy as np
import datetime
from contextlib import contextmanager
from xbbg import const, pipeline
from xbbg.io import logs, files, storage
from xbbg.core import utils, conn, process
def bsrch(tickers, flds, domain, variables, **kwargs):
logger = logs.get_logger(bdp, **kwargs)
service = conn.bbg_service(service='//blp/exrsvc', **kwargs)
request = service.createRequest("ExcelGetGridRequest")
request.set("Domain", domain)
overrides = request.getElement("Overrides")
for key, value in variables.items():
override1 = overrides.appendElement()
override1.setElement("name", key)
override1.setElement("value", value)
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
col_maps = kwargs.get('col_maps', None)
cols = res.field.unique()
return (
res
.set_index(['ticker', 'field'])
.unstack(level=1)
.rename_axis(index=None, columns=[None, None])
.droplevel(axis=1, level=0)
.loc[:, cols]
.pipe(pipeline.standard_cols, col_maps=col_maps)
)
def bdp(tickers, flds, **kwargs) -> pd.DataFrame:
"""
Bloomberg reference data
Args:
tickers: tickers
flds: fields to query
**kwargs: Bloomberg overrides
Returns:
pd.DataFrame
"""
logger = logs.get_logger(bdp, **kwargs)
if isinstance(tickers, str): tickers = [tickers]
if isinstance(flds, str): flds = [flds]
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('ReferenceDataRequest')
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
col_maps = kwargs.get('col_maps', None)
cols = res.field.unique()
return (
res
.set_index(['ticker', 'field'])
.unstack(level=1)
.rename_axis(index=None, columns=[None, None])
.droplevel(axis=1, level=0)
.loc[:, cols]
.pipe(pipeline.standard_cols, col_maps=col_maps)
)
def bds(tickers, flds, **kwargs) -> pd.DataFrame:
"""
Bloomberg block data
Args:
tickers: ticker(s)
flds: field
**kwargs: other overrides for query
Returns:
pd.DataFrame: block data
"""
logger = logs.get_logger(bds, **kwargs)
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('ReferenceDataRequest')
if isinstance(tickers, str):
data_file = storage.ref_file(
ticker=tickers, fld=flds, has_date=True, ext='pkl', **kwargs
)
if files.exists(data_file):
logger.debug(f'Loading Bloomberg data from: {data_file}')
return pd.DataFrame(pd.read_pickle(data_file))
process.init_request(request=request, tickers=tickers, flds=flds, **kwargs)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(func=process.process_ref))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'field']):
return pd.DataFrame()
data = (
res
.set_index(['ticker', 'field'])
.droplevel(axis=0, level=1)
.rename_axis(index=None)
.pipe(pipeline.standard_cols, col_maps=kwargs.get('col_maps', None))
)
if data_file:
logger.debug(f'Saving Bloomberg data to: {data_file}')
files.create_folder(data_file, is_file=True)
data.to_pickle(data_file)
return data
else:
return pd.DataFrame(pd.concat([
bds(tickers=ticker, flds=flds, **kwargs) for ticker in tickers
], sort=False))
def bdh(
tickers, flds=None, start_date=None, end_date='today', adjust=None, **kwargs
) -> pd.DataFrame:
"""
Bloomberg historical data
Args:
tickers: ticker(s)
flds: field(s)
start_date: start date
end_date: end date - default today
adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
exact match of above words will adjust for corresponding events
Case 0: `-` no adjustment for dividend or split
Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
Case 2: `adjust` will adjust for splits and ignore all dividends
Case 3: `all` == `dvd|split` == adjust for all
Case 4: None == Bloomberg default OR use kwargs
**kwargs: overrides
Returns:
pd.DataFrame
"""
logger = logs.get_logger(bdh, **kwargs)
if flds is None: flds = ['Last_Price']
e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
if start_date is None: start_date = pd.Timestamp(e_dt) - pd.Timedelta(weeks=8)
s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')
service = conn.bbg_service(service='//blp/refdata', **kwargs)
request = service.createRequest('HistoricalDataRequest')
process.init_request(
request=request, tickers=tickers, flds=flds,
start_date=s_dt, end_date=e_dt, adjust=adjust, **kwargs
)
logger.debug(f'Sending request to Bloomberg ...\n{request}')
conn.send_request(request=request, **kwargs)
res = pd.DataFrame(process.rec_events(process.process_hist))
if kwargs.get('raw', False): return res
if res.empty or any(fld not in res for fld in ['ticker', 'date']):
return pd.DataFrame()
return (
res
.set_index(['ticker', 'date'])
.unstack(level=0)
.rename_axis(index=None, columns=[None, None])
.swaplevel(0, 1, axis=1)
)
def bdib(
ticker: str, dt, session='allday', typ='TRADE', **kwargs
) -> pd.DataFrame:
"""
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
session: [allday, day, am, pm, pre, post]
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
"""
from xbbg.core import missing
logger = logs.get_logger(bdib, **kwargs)
exch = const.exch_info(ticker=ticker)
if exch.empty: raise KeyError(f'Cannot find exchange info for {ticker}')
ss_rng = process.time_range(dt=dt, ticker=ticker, session=session, tz=exch.tz)
data_file = storage.bar_file(ticker=ticker, dt=dt, typ=typ)
if files.exists(data_file) and kwargs.get('cache', True) \
and (not kwargs.get('reload', False)):
res = (
pd.read_parquet(data_file)
.pipe(pipeline.add_ticker, ticker=ticker)
.loc[ss_rng[0]:ss_rng[1]]
)
if not res.empty:
logger.debug(f'Loading Bloomberg intraday data from: {data_file}')
return res
t_1 = pd.Timestamp('today').date() - pd.Timedelta('1D')
whole_day = pd.Timestamp(dt).date() < t_1
batch = kwargs.pop('batch', False)
if (not whole_day) and batch:
logger.warning(f'Querying date {t_1} is too close, ignoring download ...')
return pd.DataFrame()
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
info_log = f'{ticker} / {cur_dt} / {typ}'
q_tckr = ticker
if exch.get('is_fut', False):
if 'freq' not in exch:
logger.error(f'[freq] missing in info for {info_log} ...')
is_sprd = exch.get('has_sprd', False) and (len(ticker[:-1]) != exch['tickers'][0])
if not is_sprd:
q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['freq'])
if q_tckr == '':
logger.error(f'cannot find futures ticker for {ticker} ...')
return pd.DataFrame()
info_log = f'{q_tckr} / {cur_dt} / {typ}'
miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='bdib')
cur_miss = missing.current_missing(**miss_kw)
if cur_miss >= 2:
if batch: return | pd.DataFrame() | pandas.DataFrame |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
# This is the desired future behavior
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = | date_range('2016-01-01', periods=3, tz='US/Central') | pandas.date_range |
import sys
import pandas as pd
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, cross_val_predict
from sklearn.model_selection import KFold
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from scipy import stats
from sklearn.metrics import r2_score
algoName = "LR" # -a
fileName = "iris" # -f
foldNum = 1 # -n
testFileName = "" # -t
ratio = 1 # -r
det_brf = "" # -e
classLevel = "" # -c
X = pd.DataFrame({})
Y = pd.DataFrame({})
X_t = pd.DataFrame({})
Y_t = pd.DataFrame({})
X_test = pd.DataFrame({})
Y_test = pd.DataFrame({})
X_train = pd.DataFrame({})
Y_train = pd.DataFrame({})
file = open("Report.txt", "w")
def read_switch():
global algoName, fileName, foldNum, testFileName, ratio, det_brf, classLevel
for i in range(1, NumOfParams):
if sys.argv[i].replace(" ", "") == '-a':
algoName = sys.argv[i+1]
elif sys.argv[i].replace(" ", "") == '-f':
fileName = sys.argv[i+1]
elif sys.argv[i].replace(" ", "") == '-n':
foldNum = sys.argv[i + 1]
elif sys.argv[i].replace(" ", "") == '-t':
testFileName = sys.argv[i + 1]
elif sys.argv[i].replace(" ", "") == '-r':
ratio = sys.argv[i + 1]
elif sys.argv[i].replace(" ", "") == '-e':
det_brf = sys.argv[i + 1]
elif sys.argv[i].replace(" ", "") == '-c':
classLevel = sys.argv[i + 1]
print("Algorithm Name is (-a): ", algoName)
print("File Name is (-f): ", fileName)
print("Fold Number is (-n): ", foldNum)
print("Test File Name is (-t): ", testFileName)
print("Ratio is (-r): ", ratio)
print("Explanation Type (-e): ", det_brf)
print("Class Level is (-c): ", classLevel)
def read_dataset():
global fileName
if fileName == "iris":
data = datasets.load_iris()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "boston":
data = datasets.load_boston()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "breast_cancer":
data = datasets.load_breast_cancer()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "diabetes":
data = datasets.load_diabetes()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "digits":
data = datasets.load_digits()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "files":
data = datasets.load_files()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "linnerud":
data = datasets.load_linnerud()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
elif fileName == "wine":
data = datasets.load_wine()
dataset = pd.DataFrame(data.data, columns=data.feature_names)
else:
dataset = pd.read_csv(fileName)
return dataset
def read_testset():
global testFileName
testset = pd.DataFrame({})
if len(testFileName) != 0:
testset = pd.read_csv(testFileName)
return testset
else:
return testset
def encoding_dataset(dataset):
global fileName
if fileName != "iris" and dataset.shape[0] != 0:
Label_Encoder = LabelEncoder()
columns = [column for column in dataset.columns if dataset[column].dtype in ['O']]
dataset[columns] = dataset[columns].apply(LabelEncoder().fit_transform)
return dataset
else:
return dataset
def dataset_target_split(dataset):
global X, Y, classLevel
if len(classLevel) != 0:
Y = dataset[classLevel]
X = dataset.drop(classLevel, axis=1)
else:
Y = dataset.iloc[:, -1]
X = dataset
X = X.iloc[: , :-1]
def testset_target_split(testset):
global X_t, Y_t, classLevel
if testset.shape[0] != 0:
if len(classLevel) != 0:
Y_t = testset[classLevel]
X_t = testset.drop(classLevel, axis=1)
else:
Y_t = testset.iloc[:, -1]
X_t = testset
X_t = X_t.iloc[:, :-1]
else:
X_t = | pd.DataFrame({}) | pandas.DataFrame |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.5e-2, 22.5, 300.]
try:
# input variables that change per simulation
ted_empty.food_multiplier = pd.Series([15., 150., 240.])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
result = ted_empty.conc_initial_plant(ted_empty.app_rate_min, ted_empty.food_multiplier)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_intake(self):
"""
:description generates pesticide intake via consumption of diet containing pesticide for animals (mammals, birds, amphibians, reptiles)
:param a1; coefficient of allometric expression
:param b1; exponent of allometric expression
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
# this represents Eqs 6 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [8.050355, 3.507997, 64.92055]
try:
# internally specified parameters
a1 = pd.Series([.398, .013, .621], dtype='float')
b1 = pd.Series([.850, .773, .564], dtype='float')
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
frac_h2o = pd.Series([0.65, 0.85, 0.7], dtype='float')
result = ted_empty.animal_dietary_intake(a1, b1, body_wgt, frac_h2o)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_animal_dietary_dose(self):
"""
:description generates pesticide dietary-based dose for animals (mammals, birds, amphibians, reptiles)
:param body_wgt; body weight of species (g)
:param frac_h2o; fraction of water in food item
:param food_intake_rate; ingestion rate of food item (g/day-ww)
:param food_pest_conc; pesticide concentration in food item (mg a.i./kg)
# this represents Eqs 5 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [3.e-4, 3.45e-2, 4.5]
try:
# variables from external database
body_wgt = pd.Series([10., 120., 450.], dtype='float')
# internally calculated variables
food_intake_rate = pd.Series([3., 12., 45.], dtype='float')
food_pest_conc = pd.Series([1.e-3, 3.45e-1, 4.50e+1], dtype='float')
result = ted_empty.animal_dietary_dose(body_wgt, food_intake_rate, food_pest_conc)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_timeseries(self):
"""
:description generates annual timeseries of daily pesticide residue concentration (EECs) for a food item
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#expected results generated by running OPP spreadsheet with appropriate inputs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.700000E+00,2.578072E+00,2.461651E+00,5.050487E+00,4.822415E+00,4.604642E+00,7.096704E+00,
6.776228E+00,6.470225E+00,6.178040E+00,5.899049E+00,5.632658E+00,5.378296E+00,5.135421E+00,
4.903513E+00,4.682078E+00,4.470643E+00,4.268756E+00,4.075986E+00,3.891921E+00,3.716168E+00,
3.548352E+00,3.388114E+00,3.235112E+00,3.089020E+00,2.949525E+00,2.816329E+00,2.689148E+00,
2.567710E+00,2.451757E+00,2.341039E+00,2.235322E+00,2.134378E+00,2.037993E+00,1.945961E+00,
1.858084E+00,1.774176E+00,1.694057E+00,1.617556E+00,1.544510E+00,1.474762E+00,1.408164E+00,
1.344574E+00,1.283855E+00,1.225878E+00,1.170520E+00,1.117661E+00,1.067189E+00,1.018997E+00,
9.729803E-01,9.290420E-01,8.870880E-01,8.470285E-01,8.087781E-01,7.722549E-01,7.373812E-01,
7.040822E-01,6.722870E-01,6.419276E-01,6.129392E-01,5.852598E-01,5.588304E-01,5.335945E-01,
5.094983E-01,4.864901E-01,4.645210E-01,4.435440E-01,4.235143E-01,4.043890E-01,3.861275E-01,
3.686906E-01,3.520411E-01,3.361435E-01,3.209638E-01,3.064696E-01,2.926299E-01,2.794152E-01,
2.667973E-01,2.547491E-01,2.432451E-01,2.322605E-01,2.217720E-01,2.117571E-01,2.021945E-01,
1.930637E-01,1.843453E-01,1.760206E-01,1.680717E-01,1.604819E-01,1.532348E-01,1.463150E-01,
1.397076E-01,1.333986E-01,1.273746E-01,1.216225E-01,1.161303E-01,1.108860E-01,1.058786E-01,
1.010973E-01,9.653187E-02,9.217264E-02,8.801028E-02,8.403587E-02,8.024095E-02,7.661739E-02,
7.315748E-02,6.985380E-02,6.669932E-02,6.368728E-02,6.081127E-02,5.806513E-02,5.544300E-02,
5.293928E-02,5.054863E-02,4.826593E-02,4.608632E-02,4.400514E-02,4.201794E-02,4.012047E-02,
3.830870E-02,3.657874E-02,3.492690E-02,3.334966E-02,3.184364E-02,3.040563E-02,2.903256E-02,
2.772150E-02,2.646964E-02,2.527431E-02,2.413297E-02,2.304316E-02,2.200257E-02,2.100897E-02,
2.006024E-02,1.915435E-02,1.828937E-02,1.746345E-02,1.667483E-02,1.592182E-02,1.520282E-02,
1.451628E-02,1.386075E-02,1.323482E-02,1.263716E-02,1.206648E-02,1.152158E-02,1.100128E-02,
1.050448E-02,1.003012E-02,9.577174E-03,9.144684E-03,8.731725E-03,8.337415E-03,7.960910E-03,
7.601408E-03,7.258141E-03,6.930375E-03,6.617410E-03,6.318579E-03,6.033242E-03,5.760790E-03,
5.500642E-03,5.252242E-03,5.015059E-03,4.788587E-03,4.572342E-03,4.365863E-03,4.168707E-03,
3.980455E-03,3.800704E-03,3.629070E-03,3.465187E-03,3.308705E-03,3.159289E-03,3.016621E-03,
2.880395E-03,2.750321E-03,2.626121E-03,2.507530E-03,2.394294E-03,2.286171E-03,2.182931E-03,
2.084354E-03,1.990228E-03,1.900352E-03,1.814535E-03,1.732594E-03,1.654353E-03,1.579645E-03,
1.508310E-03,1.440198E-03,1.375161E-03,1.313061E-03,1.253765E-03,1.197147E-03,1.143086E-03,
1.091466E-03,1.042177E-03,9.951138E-04,9.501760E-04,9.072676E-04,8.662969E-04,8.271763E-04,
7.898223E-04,7.541552E-04,7.200988E-04,6.875803E-04,6.565303E-04,6.268824E-04,5.985734E-04,
5.715428E-04,5.457328E-04,5.210884E-04,4.975569E-04,4.750880E-04,4.536338E-04,4.331484E-04,
4.135881E-04,3.949112E-04,3.770776E-04,3.600494E-04,3.437901E-04,3.282651E-04,3.134412E-04,
2.992867E-04,2.857714E-04,2.728664E-04,2.605442E-04,2.487784E-04,2.375440E-04,2.268169E-04,
2.165742E-04,2.067941E-04,1.974556E-04,1.885388E-04,1.800247E-04,1.718951E-04,1.641326E-04,
1.567206E-04,1.496433E-04,1.428857E-04,1.364332E-04,1.302721E-04,1.243892E-04,1.187720E-04,
1.134085E-04,1.082871E-04,1.033970E-04,9.872779E-05,9.426940E-05,9.001235E-05,8.594753E-05,
8.206628E-05,7.836030E-05,7.482167E-05,7.144285E-05,6.821660E-05,6.513605E-05,6.219461E-05,
5.938600E-05,5.670423E-05,5.414355E-05,5.169852E-05,4.936390E-05,4.713470E-05,4.500617E-05,
4.297377E-05,4.103314E-05,3.918015E-05,3.741084E-05,3.572142E-05,3.410830E-05,3.256803E-05,
3.109731E-05,2.969300E-05,2.835211E-05,2.707178E-05,2.584926E-05,2.468195E-05,2.356735E-05,
2.250309E-05,2.148688E-05,2.051657E-05,1.959007E-05,1.870542E-05,1.786071E-05,1.705415E-05,
1.628401E-05,1.554865E-05,1.484650E-05,1.417606E-05,1.353589E-05,1.292463E-05,1.234097E-05,
1.178368E-05,1.125154E-05,1.074344E-05,1.025829E-05,9.795037E-06,9.352709E-06,8.930356E-06,
8.527075E-06,8.142006E-06,7.774326E-06,7.423250E-06,7.088028E-06,6.767944E-06,6.462315E-06,
6.170487E-06,5.891838E-06,5.625772E-06,5.371721E-06,5.129143E-06,4.897519E-06,4.676355E-06,
4.465178E-06,4.263538E-06,4.071003E-06,3.887163E-06,3.711625E-06,3.544014E-06,3.383972E-06,
3.231157E-06,3.085243E-06,2.945919E-06,2.812886E-06,2.685860E-06,2.564571E-06,2.448759E-06,
2.338177E-06,2.232589E-06,2.131769E-06,2.035502E-06,1.943582E-06,1.855813E-06,1.772007E-06,
1.691986E-06,1.615579E-06,1.542622E-06,1.472959E-06,1.406443E-06,1.342930E-06,1.282286E-06,
1.224380E-06,1.169089E-06,1.116294E-06,1.065884E-06,1.017751E-06,9.717908E-07,9.279063E-07,
8.860035E-07,8.459930E-07,8.077893E-07,7.713109E-07,7.364797E-07,7.032215E-07,6.714651E-07,
6.411428E-07,6.121898E-07,5.845443E-07,5.581472E-07,5.329422E-07,5.088754E-07,4.858954E-07,
4.639531E-07,4.430018E-07],
[5.500000E+01,5.349602E+01,5.203317E+01,5.061032E+01,4.922638E+01,4.788028E+01,4.657099E+01,
1.002975E+02,9.755487E+01,9.488722E+01,9.229253E+01,8.976878E+01,8.731405E+01,8.492644E+01,
1.376041E+02,1.338413E+02,1.301814E+02,1.266216E+02,1.231591E+02,1.197913E+02,1.165156E+02,
1.683295E+02,1.637265E+02,1.592494E+02,1.548947E+02,1.506591E+02,1.465394E+02,1.425322E+02,
1.936347E+02,1.883397E+02,1.831896E+02,1.781802E+02,1.733079E+02,1.685688E+02,1.639593E+02,
1.594758E+02,1.551149E+02,1.508733E+02,1.467476E+02,1.427348E+02,1.388317E+02,1.350354E+02,
1.313428E+02,1.277512E+02,1.242579E+02,1.208600E+02,1.175551E+02,1.143406E+02,1.112139E+02,
1.081728E+02,1.052148E+02,1.023377E+02,9.953925E+01,9.681734E+01,9.416987E+01,9.159479E+01,
8.909012E+01,8.665395E+01,8.428439E+01,8.197963E+01,7.973789E+01,7.755746E+01,7.543664E+01,
7.337382E+01,7.136741E+01,6.941587E+01,6.751769E+01,6.567141E+01,6.387562E+01,6.212894E+01,
6.043002E+01,5.877756E+01,5.717028E+01,5.560696E+01,5.408638E+01,5.260739E+01,5.116884E+01,
4.976962E+01,4.840867E+01,4.708493E+01,4.579739E+01,4.454506E+01,4.332697E+01,4.214220E+01,
4.098981E+01,3.986895E+01,3.877873E+01,3.771832E+01,3.668691E+01,3.568371E+01,3.470793E+01,
3.375884E+01,3.283571E+01,3.193781E+01,3.106447E+01,3.021501E+01,2.938878E+01,2.858514E+01,
2.780348E+01,2.704319E+01,2.630369E+01,2.558442E+01,2.488481E+01,2.420434E+01,2.354247E+01,
2.289870E+01,2.227253E+01,2.166349E+01,2.107110E+01,2.049491E+01,1.993447E+01,1.938936E+01,
1.885916E+01,1.834346E+01,1.784185E+01,1.735397E+01,1.687942E+01,1.641785E+01,1.596891E+01,
1.553224E+01,1.510751E+01,1.469439E+01,1.429257E+01,1.390174E+01,1.352160E+01,1.315185E+01,
1.279221E+01,1.244241E+01,1.210217E+01,1.177123E+01,1.144935E+01,1.113627E+01,1.083174E+01,
1.053555E+01,1.024745E+01,9.967237E+00,9.694682E+00,9.429580E+00,9.171728E+00,8.920927E+00,
8.676983E+00,8.439711E+00,8.208926E+00,7.984453E+00,7.766118E+00,7.553753E+00,7.347195E+00,
7.146286E+00,6.950870E+00,6.760798E+00,6.575924E+00,6.396105E+00,6.221203E+00,6.051084E+00,
5.885617E+00,5.724674E+00,5.568133E+00,5.415872E+00,5.267774E+00,5.123727E+00,4.983618E+00,
4.847341E+00,4.714790E+00,4.585864E+00,4.460463E+00,4.338492E+00,4.219855E+00,4.104463E+00,
3.992226E+00,3.883059E+00,3.776876E+00,3.673597E+00,3.573143E+00,3.475435E+00,3.380399E+00,
3.287962E+00,3.198052E+00,3.110601E+00,3.025542E+00,2.942808E+00,2.862337E+00,2.784066E+00,
2.707936E+00,2.633887E+00,2.561863E+00,2.491809E+00,2.423670E+00,2.357395E+00,2.292932E+00,
2.230232E+00,2.169246E+00,2.109928E+00,2.052232E+00,1.996113E+00,1.941529E+00,1.888438E+00,
1.836799E+00,1.786571E+00,1.737718E+00,1.690200E+00,1.643981E+00,1.599026E+00,1.555301E+00,
1.512771E+00,1.471404E+00,1.431169E+00,1.392033E+00,1.353968E+00,1.316944E+00,1.280932E+00,
1.245905E+00,1.211835E+00,1.178698E+00,1.146466E+00,1.115116E+00,1.084623E+00,1.054964E+00,
1.026116E+00,9.980566E-01,9.707647E-01,9.442191E-01,9.183994E-01,8.932857E-01,8.688588E-01,
8.450998E-01,8.219905E-01,7.995131E-01,7.776504E-01,7.563855E-01,7.357021E-01,7.155843E-01,
6.960166E-01,6.769840E-01,6.584718E-01,6.404659E-01,6.229523E-01,6.059176E-01,5.893488E-01,
5.732330E-01,5.575579E-01,5.423115E-01,5.274819E-01,5.130579E-01,4.990283E-01,4.853824E-01,
4.721095E-01,4.591997E-01,4.466428E-01,4.344294E-01,4.225499E-01,4.109952E-01,3.997565E-01,
3.888252E-01,3.781927E-01,3.678510E-01,3.577921E-01,3.480083E-01,3.384920E-01,3.292359E-01,
3.202329E-01,3.114761E-01,3.029588E-01,2.946744E-01,2.866165E-01,2.787790E-01,2.711557E-01,
2.637410E-01,2.565290E-01,2.495142E-01,2.426912E-01,2.360548E-01,2.295998E-01,2.233214E-01,
2.172147E-01,2.112749E-01,2.054976E-01,1.998783E-01,1.944126E-01,1.890964E-01,1.839255E-01,
1.788961E-01,1.740041E-01,1.692460E-01,1.646180E-01,1.601165E-01,1.557381E-01,1.514794E-01,
1.473372E-01,1.433082E-01,1.393895E-01,1.355779E-01,1.318705E-01,1.282645E-01,1.247571E-01,
1.213456E-01,1.180274E-01,1.147999E-01,1.116607E-01,1.086073E-01,1.056375E-01,1.027488E-01,
9.993914E-02,9.720630E-02,9.454818E-02,9.196276E-02,8.944803E-02,8.700207E-02,8.462300E-02,
8.230898E-02,8.005823E-02,7.786904E-02,7.573970E-02,7.366860E-02,7.165412E-02,6.969474E-02,
6.778893E-02,6.593524E-02,6.413224E-02,6.237854E-02,6.067279E-02,5.901369E-02,5.739996E-02,
5.583036E-02,5.430367E-02,5.281874E-02,5.137440E-02,4.996957E-02,4.860315E-02,4.727409E-02,
4.598138E-02,4.472402E-02,4.350104E-02,4.231150E-02,4.115449E-02,4.002912E-02,3.893452E-02,
3.786985E-02,3.683430E-02,3.582706E-02,3.484737E-02,3.389447E-02,3.296762E-02,3.206612E-02,
3.118927E-02,3.033640E-02,2.950685E-02,2.869998E-02,2.791518E-02,2.715184E-02,2.640937E-02,
2.568720E-02,2.498478E-02,2.430157E-02,2.363705E-02,2.299069E-02,2.236201E-02,2.175052E-02,
2.115575E-02,2.057724E-02,2.001456E-02,1.946726E-02,1.893493E-02,1.841715E-02,1.791353E-02,
1.742368E-02,1.694723E-02],
[3.000000E+02,2.941172E+02,2.883497E+02,2.826954E+02,2.771519E+02,2.717171E+02,2.663889E+02,
2.611652E+02,2.560439E+02,2.510230E+02,2.461006E+02,2.412747E+02,2.365435E+02,2.319050E+02,
2.273575E+02,2.228991E+02,2.185282E+02,2.142430E+02,2.100418E+02,2.059231E+02,2.018850E+02,
1.979262E+02,1.940450E+02,1.902399E+02,1.865094E+02,1.828520E+02,1.792664E+02,1.757511E+02,
1.723048E+02,1.689260E+02,1.656134E+02,1.623658E+02,1.591820E+02,1.560605E+02,1.530002E+02,
1.500000E+02,1.470586E+02,1.441749E+02,1.413477E+02,1.385759E+02,1.358585E+02,1.331944E+02,
1.305826E+02,1.280219E+02,1.255115E+02,1.230503E+02,1.206374E+02,1.182717E+02,1.159525E+02,
1.136787E+02,1.114496E+02,1.092641E+02,1.071215E+02,1.050209E+02,1.029615E+02,1.009425E+02,
9.896309E+01,9.702249E+01,9.511994E+01,9.325469E+01,9.142602E+01,8.963322E+01,8.787556E+01,
8.615238E+01,8.446298E+01,8.280671E+01,8.118292E+01,7.959098E+01,7.803025E+01,7.650012E+01,
7.500000E+01,7.352930E+01,7.208743E+01,7.067384E+01,6.928797E+01,6.792927E+01,6.659722E+01,
6.529129E+01,6.401097E+01,6.275575E+01,6.152515E+01,6.031868E+01,5.913587E+01,5.797625E+01,
5.683937E+01,5.572479E+01,5.463206E+01,5.356076E+01,5.251046E+01,5.148076E+01,5.047126E+01,
4.948155E+01,4.851124E+01,4.755997E+01,4.662735E+01,4.571301E+01,4.481661E+01,4.393778E+01,
4.307619E+01,4.223149E+01,4.140336E+01,4.059146E+01,3.979549E+01,3.901512E+01,3.825006E+01,
3.750000E+01,3.676465E+01,3.604372E+01,3.533692E+01,3.464398E+01,3.396464E+01,3.329861E+01,
3.264565E+01,3.200548E+01,3.137788E+01,3.076258E+01,3.015934E+01,2.956793E+01,2.898813E+01,
2.841969E+01,2.786239E+01,2.731603E+01,2.678038E+01,2.625523E+01,2.574038E+01,2.523563E+01,
2.474077E+01,2.425562E+01,2.377998E+01,2.331367E+01,2.285651E+01,2.240830E+01,2.196889E+01,
2.153809E+01,2.111575E+01,2.070168E+01,2.029573E+01,1.989774E+01,1.950756E+01,1.912503E+01,
1.875000E+01,1.838232E+01,1.802186E+01,1.766846E+01,1.732199E+01,1.698232E+01,1.664931E+01,
1.632282E+01,1.600274E+01,1.568894E+01,1.538129E+01,1.507967E+01,1.478397E+01,1.449406E+01,
1.420984E+01,1.393120E+01,1.365801E+01,1.339019E+01,1.312762E+01,1.287019E+01,1.261781E+01,
1.237039E+01,1.212781E+01,1.188999E+01,1.165684E+01,1.142825E+01,1.120415E+01,1.098445E+01,
1.076905E+01,1.055787E+01,1.035084E+01,1.014787E+01,9.948872E+00,9.753781E+00,9.562515E+00,
9.375000E+00,9.191162E+00,9.010929E+00,8.834230E+00,8.660996E+00,8.491159E+00,8.324653E+00,
8.161412E+00,8.001371E+00,7.844469E+00,7.690644E+00,7.539835E+00,7.391984E+00,7.247031E+00,
7.104921E+00,6.965598E+00,6.829007E+00,6.695094E+00,6.563808E+00,6.435095E+00,6.308907E+00,
6.185193E+00,6.063905E+00,5.944996E+00,5.828418E+00,5.714127E+00,5.602076E+00,5.492223E+00,
5.384524E+00,5.278936E+00,5.175420E+00,5.073933E+00,4.974436E+00,4.876890E+00,4.781258E+00,
4.687500E+00,4.595581E+00,4.505464E+00,4.417115E+00,4.330498E+00,4.245580E+00,4.162326E+00,
4.080706E+00,4.000686E+00,3.922235E+00,3.845322E+00,3.769918E+00,3.695992E+00,3.623516E+00,
3.552461E+00,3.482799E+00,3.414504E+00,3.347547E+00,3.281904E+00,3.217548E+00,3.154454E+00,
3.092597E+00,3.031953E+00,2.972498E+00,2.914209E+00,2.857063E+00,2.801038E+00,2.746111E+00,
2.692262E+00,2.639468E+00,2.587710E+00,2.536966E+00,2.487218E+00,2.438445E+00,2.390629E+00,
2.343750E+00,2.297790E+00,2.252732E+00,2.208558E+00,2.165249E+00,2.122790E+00,2.081163E+00,
2.040353E+00,2.000343E+00,1.961117E+00,1.922661E+00,1.884959E+00,1.847996E+00,1.811758E+00,
1.776230E+00,1.741400E+00,1.707252E+00,1.673774E+00,1.640952E+00,1.608774E+00,1.577227E+00,
1.546298E+00,1.515976E+00,1.486249E+00,1.457105E+00,1.428532E+00,1.400519E+00,1.373056E+00,
1.346131E+00,1.319734E+00,1.293855E+00,1.268483E+00,1.243609E+00,1.219223E+00,1.195314E+00,
1.171875E+00,1.148895E+00,1.126366E+00,1.104279E+00,1.082625E+00,1.061395E+00,1.040582E+00,
1.020176E+00,1.000171E+00,9.805587E-01,9.613305E-01,9.424794E-01,9.239979E-01,9.058789E-01,
8.881152E-01,8.706998E-01,8.536259E-01,8.368868E-01,8.204760E-01,8.043869E-01,7.886134E-01,
7.731492E-01,7.579882E-01,7.431245E-01,7.285523E-01,7.142658E-01,7.002595E-01,6.865278E-01,
6.730654E-01,6.598670E-01,6.469274E-01,6.342416E-01,6.218045E-01,6.096113E-01,5.976572E-01,
5.859375E-01,5.744476E-01,5.631831E-01,5.521394E-01,5.413123E-01,5.306975E-01,5.202908E-01,
5.100882E-01,5.000857E-01,4.902793E-01,4.806652E-01,4.712397E-01,4.619990E-01,4.529395E-01,
4.440576E-01,4.353499E-01,4.268129E-01,4.184434E-01,4.102380E-01,4.021935E-01,3.943067E-01,
3.865746E-01,3.789941E-01,3.715622E-01,3.642761E-01,3.571329E-01,3.501297E-01,3.432639E-01,
3.365327E-01,3.299335E-01,3.234637E-01,3.171208E-01,3.109023E-01,3.048056E-01,2.988286E-01,
2.929687E-01,2.872238E-01,2.815915E-01,2.760697E-01,2.706561E-01,2.653487E-01,2.601454E-01,
2.550441E-01,2.500429E-01,2.451397E-01,2.403326E-01,2.356198E-01,2.309995E-01,2.264697E-01,
2.220288E-01,2.176749E-01]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# internally specified variable (from internal database)
food_multiplier = pd.Series([15., 110., 240.])
# input variables that change per simulation
ted_empty.foliar_diss_hlife = pd.Series([15., 25., 35.])
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_plant_timeseries(i, ted_empty.app_rate_min[i], food_multiplier[i], daily_flag[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_soil_h2o_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in soil pore water and surface puddles
:param i; simulation number/index
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:param daily_flag; daily flag denoting if pesticide is applied (0 - not applied, 1 - applied)
:param water_type; type of water (pore water or surface puddles)
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [[2.235571E-02,2.134616E-02,2.038220E-02,4.181749E-02,3.992908E-02,3.812594E-02,
5.875995E-02,5.610644E-02,5.357277E-02,5.115350E-02,4.884349E-02,4.663780E-02,
4.453171E-02,4.252073E-02,4.060056E-02,3.876711E-02,3.701645E-02,3.534484E-02,
3.374873E-02,3.222469E-02,3.076947E-02,2.937997E-02,2.805322E-02,2.678638E-02,
2.557675E-02,2.442175E-02,2.331890E-02,2.226586E-02,2.126037E-02,2.030028E-02,
1.938355E-02,1.850822E-02,1.767242E-02,1.687436E-02,1.611234E-02,1.538474E-02,
1.468999E-02,1.402661E-02,1.339319E-02,1.278838E-02,1.221087E-02,1.165945E-02,
1.113293E-02,1.063018E-02,1.015014E-02,9.691777E-03,9.254112E-03,8.836211E-03,
8.437182E-03,8.056172E-03,7.692368E-03,7.344993E-03,7.013305E-03,6.696596E-03,
6.394188E-03,6.105437E-03,5.829725E-03,5.566464E-03,5.315091E-03,5.075070E-03,
4.845888E-03,4.627056E-03,4.418105E-03,4.218591E-03,4.028086E-03,3.846184E-03,
3.672497E-03,3.506653E-03,3.348298E-03,3.197094E-03,3.052718E-03,2.914863E-03,
2.783232E-03,2.657546E-03,2.537535E-03,2.422944E-03,2.313528E-03,2.209053E-03,
2.109295E-03,2.014043E-03,1.923092E-03,1.836248E-03,1.753326E-03,1.674149E-03,
1.598547E-03,1.526359E-03,1.457431E-03,1.391616E-03,1.328773E-03,1.268768E-03,
1.211472E-03,1.156764E-03,1.104526E-03,1.054648E-03,1.007022E-03,9.615460E-04,
9.181242E-04,8.766632E-04,8.370745E-04,7.992735E-04,7.631796E-04,7.287156E-04,
6.958080E-04,6.643864E-04,6.343838E-04,6.057361E-04,5.783820E-04,5.522632E-04,
5.273239E-04,5.035108E-04,4.807730E-04,4.590621E-04,4.383316E-04,4.185372E-04,
3.996368E-04,3.815898E-04,3.643578E-04,3.479040E-04,3.321932E-04,3.171919E-04,
3.028680E-04,2.891910E-04,2.761316E-04,2.636619E-04,2.517554E-04,2.403865E-04,
2.295310E-04,2.191658E-04,2.092686E-04,1.998184E-04,1.907949E-04,1.821789E-04,
1.739520E-04,1.660966E-04,1.585960E-04,1.514340E-04,1.445955E-04,1.380658E-04,
1.318310E-04,1.258777E-04,1.201933E-04,1.147655E-04,1.095829E-04,1.046343E-04,
9.990919E-05,9.539745E-05,9.108945E-05,8.697600E-05,8.304830E-05,7.929798E-05,
7.571701E-05,7.229775E-05,6.903290E-05,6.591548E-05,6.293885E-05,6.009663E-05,
5.738276E-05,5.479145E-05,5.231715E-05,4.995459E-05,4.769873E-05,4.554473E-05,
4.348800E-05,4.152415E-05,3.964899E-05,3.785850E-05,3.614887E-05,3.451645E-05,
3.295774E-05,3.146942E-05,3.004831E-05,2.869138E-05,2.739572E-05,2.615858E-05,
2.497730E-05,2.384936E-05,2.277236E-05,2.174400E-05,2.076208E-05,1.982449E-05,
1.892925E-05,1.807444E-05,1.725822E-05,1.647887E-05,1.573471E-05,1.502416E-05,
1.434569E-05,1.369786E-05,1.307929E-05,1.248865E-05,1.192468E-05,1.138618E-05,
1.087200E-05,1.038104E-05,9.912247E-06,9.464626E-06,9.037219E-06,8.629112E-06,
8.239435E-06,7.867356E-06,7.512079E-06,7.172845E-06,6.848931E-06,6.539644E-06,
6.244324E-06,5.962341E-06,5.693091E-06,5.436000E-06,5.190519E-06,4.956124E-06,
4.732313E-06,4.518609E-06,4.314556E-06,4.119718E-06,3.933678E-06,3.756039E-06,
3.586423E-06,3.424465E-06,3.269822E-06,3.122162E-06,2.981170E-06,2.846545E-06,
2.718000E-06,2.595260E-06,2.478062E-06,2.366156E-06,2.259305E-06,2.157278E-06,
2.059859E-06,1.966839E-06,1.878020E-06,1.793211E-06,1.712233E-06,1.634911E-06,
1.561081E-06,1.490585E-06,1.423273E-06,1.359000E-06,1.297630E-06,1.239031E-06,
1.183078E-06,1.129652E-06,1.078639E-06,1.029929E-06,9.834195E-07,9.390098E-07,
8.966056E-07,8.561164E-07,8.174555E-07,7.805405E-07,7.452926E-07,7.116364E-07,
6.795000E-07,6.488149E-07,6.195154E-07,5.915391E-07,5.648262E-07,5.393195E-07,
5.149647E-07,4.917097E-07,4.695049E-07,4.483028E-07,4.280582E-07,4.087278E-07,
3.902703E-07,3.726463E-07,3.558182E-07,3.397500E-07,3.244074E-07,3.097577E-07,
2.957696E-07,2.824131E-07,2.696598E-07,2.574824E-07,2.458549E-07,2.347525E-07,
2.241514E-07,2.140291E-07,2.043639E-07,1.951351E-07,1.863231E-07,1.779091E-07,
1.698750E-07,1.622037E-07,1.548789E-07,1.478848E-07,1.412065E-07,1.348299E-07,
1.287412E-07,1.229274E-07,1.173762E-07,1.120757E-07,1.070145E-07,1.021819E-07,
9.756757E-08,9.316157E-08,8.895455E-08,8.493750E-08,8.110186E-08,7.743943E-08,
7.394239E-08,7.060327E-08,6.741494E-08,6.437059E-08,6.146372E-08,5.868811E-08,
5.603785E-08,5.350727E-08,5.109097E-08,4.878378E-08,4.658079E-08,4.447727E-08,
4.246875E-08,4.055093E-08,3.871971E-08,3.697119E-08,3.530163E-08,3.370747E-08,
3.218529E-08,3.073186E-08,2.934406E-08,2.801893E-08,2.675364E-08,2.554549E-08,
2.439189E-08,2.329039E-08,2.223864E-08,2.123438E-08,2.027546E-08,1.935986E-08,
1.848560E-08,1.765082E-08,1.685373E-08,1.609265E-08,1.536593E-08,1.467203E-08,
1.400946E-08,1.337682E-08,1.277274E-08,1.219595E-08,1.164520E-08,1.111932E-08,
1.061719E-08,1.013773E-08,9.679929E-09,9.242799E-09,8.825409E-09,8.426867E-09,
8.046324E-09,7.682965E-09,7.336014E-09,7.004732E-09,6.688409E-09,6.386371E-09,
6.097973E-09,5.822598E-09,5.559659E-09,5.308594E-09,5.068866E-09,4.839964E-09,
4.621399E-09,4.412704E-09,4.213434E-09,4.023162E-09,3.841482E-09,3.668007E-09],
[9.391514E-02,8.762592E-02,8.175787E-02,7.628279E-02,7.117436E-02,6.640803E-02,
6.196088E-02,1.517267E-01,1.415660E-01,1.320858E-01,1.232404E-01,1.149873E-01,
1.072870E-01,1.001023E-01,1.873139E-01,1.747700E-01,1.630662E-01,1.521461E-01,
1.419574E-01,1.324509E-01,1.235811E-01,2.092203E-01,1.952095E-01,1.821369E-01,
1.699397E-01,1.585594E-01,1.479411E-01,1.380340E-01,2.227054E-01,2.077915E-01,
1.938763E-01,1.808930E-01,1.687791E-01,1.574765E-01,1.469307E-01,1.370912E-01,
1.279106E-01,1.193449E-01,1.113527E-01,1.038957E-01,9.693814E-02,9.044648E-02,
8.438955E-02,7.873824E-02,7.346537E-02,6.854562E-02,6.395532E-02,5.967242E-02,
5.567634E-02,5.194786E-02,4.846907E-02,4.522324E-02,4.219478E-02,3.936912E-02,
3.673269E-02,3.427281E-02,3.197766E-02,2.983621E-02,2.783817E-02,2.597393E-02,
2.423454E-02,2.261162E-02,2.109739E-02,1.968456E-02,1.836634E-02,1.713640E-02,
1.598883E-02,1.491811E-02,1.391909E-02,1.298697E-02,1.211727E-02,1.130581E-02,
1.054869E-02,9.842280E-03,9.183172E-03,8.568202E-03,7.994415E-03,7.459053E-03,
6.959543E-03,6.493483E-03,6.058634E-03,5.652905E-03,5.274347E-03,4.921140E-03,
4.591586E-03,4.284101E-03,3.997208E-03,3.729527E-03,3.479771E-03,3.246741E-03,
3.029317E-03,2.826453E-03,2.637174E-03,2.460570E-03,2.295793E-03,2.142051E-03,
1.998604E-03,1.864763E-03,1.739886E-03,1.623371E-03,1.514658E-03,1.413226E-03,
1.318587E-03,1.230285E-03,1.147896E-03,1.071025E-03,9.993019E-04,9.323816E-04,
8.699428E-04,8.116854E-04,7.573292E-04,7.066131E-04,6.592934E-04,6.151425E-04,
5.739482E-04,5.355126E-04,4.996509E-04,4.661908E-04,4.349714E-04,4.058427E-04,
3.786646E-04,3.533066E-04,3.296467E-04,3.075712E-04,2.869741E-04,2.677563E-04,
2.498255E-04,2.330954E-04,2.174857E-04,2.029213E-04,1.893323E-04,1.766533E-04,
1.648233E-04,1.537856E-04,1.434871E-04,1.338782E-04,1.249127E-04,1.165477E-04,
1.087429E-04,1.014607E-04,9.466615E-05,8.832664E-05,8.241167E-05,7.689281E-05,
7.174353E-05,6.693908E-05,6.245637E-05,5.827385E-05,5.437143E-05,5.073034E-05,
4.733308E-05,4.416332E-05,4.120584E-05,3.844640E-05,3.587176E-05,3.346954E-05,
3.122818E-05,2.913693E-05,2.718571E-05,2.536517E-05,2.366654E-05,2.208166E-05,
2.060292E-05,1.922320E-05,1.793588E-05,1.673477E-05,1.561409E-05,1.456846E-05,
1.359286E-05,1.268258E-05,1.183327E-05,1.104083E-05,1.030146E-05,9.611601E-06,
8.967941E-06,8.367385E-06,7.807046E-06,7.284232E-06,6.796428E-06,6.341292E-06,
5.916635E-06,5.520415E-06,5.150730E-06,4.805801E-06,4.483971E-06,4.183692E-06,
3.903523E-06,3.642116E-06,3.398214E-06,3.170646E-06,2.958317E-06,2.760208E-06,
2.575365E-06,2.402900E-06,2.241985E-06,2.091846E-06,1.951762E-06,1.821058E-06,
1.699107E-06,1.585323E-06,1.479159E-06,1.380104E-06,1.287682E-06,1.201450E-06,
1.120993E-06,1.045923E-06,9.758808E-07,9.105289E-07,8.495535E-07,7.926615E-07,
7.395793E-07,6.900519E-07,6.438412E-07,6.007251E-07,5.604963E-07,5.229616E-07,
4.879404E-07,4.552645E-07,4.247768E-07,3.963307E-07,3.697897E-07,3.450260E-07,
3.219206E-07,3.003625E-07,2.802482E-07,2.614808E-07,2.439702E-07,2.276322E-07,
2.123884E-07,1.981654E-07,1.848948E-07,1.725130E-07,1.609603E-07,1.501813E-07,
1.401241E-07,1.307404E-07,1.219851E-07,1.138161E-07,1.061942E-07,9.908269E-08,
9.244741E-08,8.625649E-08,8.048015E-08,7.509063E-08,7.006204E-08,6.537019E-08,
6.099255E-08,5.690806E-08,5.309710E-08,4.954134E-08,4.622371E-08,4.312824E-08,
4.024007E-08,3.754532E-08,3.503102E-08,3.268510E-08,3.049627E-08,2.845403E-08,
2.654855E-08,2.477067E-08,2.311185E-08,2.156412E-08,2.012004E-08,1.877266E-08,
1.751551E-08,1.634255E-08,1.524814E-08,1.422702E-08,1.327427E-08,1.238534E-08,
1.155593E-08,1.078206E-08,1.006002E-08,9.386329E-09,8.757755E-09,8.171274E-09,
7.624068E-09,7.113507E-09,6.637137E-09,6.192668E-09,5.777963E-09,5.391030E-09,
5.030009E-09,4.693165E-09,4.378877E-09,4.085637E-09,3.812034E-09,3.556754E-09,
3.318569E-09,3.096334E-09,2.888982E-09,2.695515E-09,2.515005E-09,2.346582E-09,
2.189439E-09,2.042819E-09,1.906017E-09,1.778377E-09,1.659284E-09,1.548167E-09,
1.444491E-09,1.347758E-09,1.257502E-09,1.173291E-09,1.094719E-09,1.021409E-09,
9.530086E-10,8.891884E-10,8.296421E-10,7.740835E-10,7.222454E-10,6.738788E-10,
6.287512E-10,5.866456E-10,5.473597E-10,5.107046E-10,4.765043E-10,4.445942E-10,
4.148211E-10,3.870417E-10,3.611227E-10,3.369394E-10,3.143756E-10,2.933228E-10,
2.736798E-10,2.553523E-10,2.382521E-10,2.222971E-10,2.074105E-10,1.935209E-10,
1.805614E-10,1.684697E-10,1.571878E-10,1.466614E-10,1.368399E-10,1.276762E-10,
1.191261E-10,1.111486E-10,1.037053E-10,9.676043E-11,9.028068E-11,8.423485E-11,
7.859390E-11,7.333070E-11,6.841996E-11,6.383808E-11,5.956303E-11,5.557428E-11,
5.185263E-11,4.838022E-11,4.514034E-11,4.211743E-11,3.929695E-11,3.666535E-11,
3.420998E-11,3.191904E-11,2.978152E-11,2.778714E-11,2.592632E-11,2.419011E-11,
2.257017E-11,2.105871E-11,1.964847E-11,1.833267E-11,1.710499E-11,1.595952E-11],
[1.172251E-01,1.132320E-01,1.093749E-01,1.056492E-01,1.020504E-01,9.857420E-02,
9.521640E-02,9.197298E-02,8.884005E-02,8.581383E-02,8.289069E-02,8.006713E-02,
7.733975E-02,7.470528E-02,7.216054E-02,6.970249E-02,6.732817E-02,6.503472E-02,
6.281940E-02,6.067954E-02,5.861257E-02,5.661601E-02,5.468746E-02,5.282461E-02,
5.102521E-02,4.928710E-02,4.760820E-02,4.598649E-02,4.442002E-02,4.290691E-02,
4.144535E-02,4.003357E-02,3.866988E-02,3.735264E-02,3.608027E-02,3.485124E-02,
3.366408E-02,3.251736E-02,3.140970E-02,3.033977E-02,2.930629E-02,2.830801E-02,
2.734373E-02,2.641230E-02,2.551260E-02,2.464355E-02,2.380410E-02,2.299325E-02,
2.221001E-02,2.145346E-02,2.072267E-02,2.001678E-02,1.933494E-02,1.867632E-02,
1.804014E-02,1.742562E-02,1.683204E-02,1.625868E-02,1.570485E-02,1.516989E-02,
1.465314E-02,1.415400E-02,1.367187E-02,1.320615E-02,1.275630E-02,1.232178E-02,
1.190205E-02,1.149662E-02,1.110501E-02,1.072673E-02,1.036134E-02,1.000839E-02,
9.667469E-03,9.338160E-03,9.020068E-03,8.712811E-03,8.416021E-03,8.129340E-03,
7.852425E-03,7.584943E-03,7.326572E-03,7.077002E-03,6.835933E-03,6.603076E-03,
6.378151E-03,6.160888E-03,5.951025E-03,5.748312E-03,5.552503E-03,5.363364E-03,
5.180668E-03,5.004196E-03,4.833735E-03,4.669080E-03,4.510034E-03,4.356406E-03,
4.208010E-03,4.064670E-03,3.926212E-03,3.792471E-03,3.663286E-03,3.538501E-03,
3.417966E-03,3.301538E-03,3.189075E-03,3.080444E-03,2.975513E-03,2.874156E-03,
2.776251E-03,2.681682E-03,2.590334E-03,2.502098E-03,2.416867E-03,2.334540E-03,
2.255017E-03,2.178203E-03,2.104005E-03,2.032335E-03,1.963106E-03,1.896236E-03,
1.831643E-03,1.769250E-03,1.708983E-03,1.650769E-03,1.594538E-03,1.540222E-03,
1.487756E-03,1.437078E-03,1.388126E-03,1.340841E-03,1.295167E-03,1.251049E-03,
1.208434E-03,1.167270E-03,1.127508E-03,1.089101E-03,1.052003E-03,1.016168E-03,
9.815531E-04,9.481178E-04,9.158214E-04,8.846252E-04,8.544916E-04,8.253845E-04,
7.972689E-04,7.701110E-04,7.438782E-04,7.185389E-04,6.940629E-04,6.704205E-04,
6.475836E-04,6.255245E-04,6.042168E-04,5.836350E-04,5.637542E-04,5.445507E-04,
5.260013E-04,5.080838E-04,4.907766E-04,4.740589E-04,4.579107E-04,4.423126E-04,
4.272458E-04,4.126923E-04,3.986344E-04,3.850555E-04,3.719391E-04,3.592695E-04,
3.470314E-04,3.352103E-04,3.237918E-04,3.127622E-04,3.021084E-04,2.918175E-04,
2.818771E-04,2.722753E-04,2.630006E-04,2.540419E-04,2.453883E-04,2.370295E-04,
2.289554E-04,2.211563E-04,2.136229E-04,2.063461E-04,1.993172E-04,1.925277E-04,
1.859695E-04,1.796347E-04,1.735157E-04,1.676051E-04,1.618959E-04,1.563811E-04,
1.510542E-04,1.459087E-04,1.409386E-04,1.361377E-04,1.315003E-04,1.270209E-04,
1.226941E-04,1.185147E-04,1.144777E-04,1.105782E-04,1.068115E-04,1.031731E-04,
9.965861E-05,9.626387E-05,9.298477E-05,8.981737E-05,8.675786E-05,8.380257E-05,
8.094794E-05,7.819056E-05,7.552710E-05,7.295437E-05,7.046928E-05,6.806884E-05,
6.575016E-05,6.351047E-05,6.134707E-05,5.925736E-05,5.723884E-05,5.528908E-05,
5.340573E-05,5.158653E-05,4.982930E-05,4.813194E-05,4.649239E-05,4.490868E-05,
4.337893E-05,4.190128E-05,4.047397E-05,3.909528E-05,3.776355E-05,3.647719E-05,
3.523464E-05,3.403442E-05,3.287508E-05,3.175523E-05,3.067354E-05,2.962868E-05,
2.861942E-05,2.764454E-05,2.670286E-05,2.579327E-05,2.491465E-05,2.406597E-05,
2.324619E-05,2.245434E-05,2.168946E-05,2.095064E-05,2.023699E-05,1.954764E-05,
1.888178E-05,1.823859E-05,1.761732E-05,1.701721E-05,1.643754E-05,1.587762E-05,
1.533677E-05,1.481434E-05,1.430971E-05,1.382227E-05,1.335143E-05,1.289663E-05,
1.245733E-05,1.203298E-05,1.162310E-05,1.122717E-05,1.084473E-05,1.047532E-05,
1.011849E-05,9.773820E-06,9.440888E-06,9.119297E-06,8.808660E-06,8.508605E-06,
8.218770E-06,7.938809E-06,7.668384E-06,7.407170E-06,7.154855E-06,6.911134E-06,
6.675716E-06,6.448316E-06,6.228663E-06,6.016492E-06,5.811548E-06,5.613585E-06,
5.422366E-06,5.237660E-06,5.059247E-06,4.886910E-06,4.720444E-06,4.559648E-06,
4.404330E-06,4.254302E-06,4.109385E-06,3.969404E-06,3.834192E-06,3.703585E-06,
3.577428E-06,3.455567E-06,3.337858E-06,3.224158E-06,3.114332E-06,3.008246E-06,
2.905774E-06,2.806793E-06,2.711183E-06,2.618830E-06,2.529623E-06,2.443455E-06,
2.360222E-06,2.279824E-06,2.202165E-06,2.127151E-06,2.054693E-06,1.984702E-06,
1.917096E-06,1.851793E-06,1.788714E-06,1.727784E-06,1.668929E-06,1.612079E-06,
1.557166E-06,1.504123E-06,1.452887E-06,1.403396E-06,1.355592E-06,1.309415E-06,
1.264812E-06,1.221728E-06,1.180111E-06,1.139912E-06,1.101082E-06,1.063576E-06,
1.027346E-06,9.923511E-07,9.585480E-07,9.258963E-07,8.943569E-07,8.638918E-07,
8.344645E-07,8.060396E-07,7.785829E-07,7.520615E-07,7.264435E-07,7.016982E-07,
6.777958E-07,6.547076E-07,6.324058E-07,6.108638E-07,5.900555E-07,5.699560E-07,
5.505412E-07,5.317878E-07,5.136731E-07,4.961755E-07,4.792740E-07,4.629482E-07,
4.471784E-07,4.319459E-07,4.172322E-07,4.030198E-07,3.892914E-07,3.760307E-07]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
ted_empty.app_rate_conv1 = 11.2
ted_empty.h2o_depth_puddles = 1.3
ted_empty.soil_depth = 2.6
ted_empty.soil_porosity = 0.4339623
ted_empty.soil_bulk_density = 1.5
ted_empty.h2o_depth_soil = 0.0
ted_empty.soil_foc = 0.015
# internally specified variable
water_type = ['puddles', 'pore_water', 'puddles']
# input variables that change per simulation
ted_empty.aerobic_soil_meta_hlife = pd.Series([15., 10., 20.], dtype='float')
ted_empty.koc = pd.Series([1500., 1000., 2000.], dtype='float')
ted_empty.app_rate_min = pd.Series([0.18, 0.5, 1.25]) # lbs a.i./acre
# application scenarios generated from 'daily_app_flag' tests and reused here
daily_flag = pd.Series([[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]], dtype='bool')
for i in range(3):
result[i] = ted_empty.daily_soil_h2o_timeseries(i, ted_empty.app_rate_min[i], daily_flag[i], water_type[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_daily_plant_dew_timeseries(self):
"""
:description generates annual timeseries of daily pesticide concentrations in dew that resides on broad leaf plants
:param i; simulation number/index
:param blp_conc; daily values of pesticide concentration in broad leaf plant dew
:Notes # calculations are performed daily from day of first application (assumed day 0) through the last day of a year
# note: day numbers are synchronized with 0-based array indexing; thus the year does not have a calendar specific
# assoication, rather it is one year from the day of 1st pesticide application
#this represents Eq 11 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = | pd.Series([[]], dtype='float') | pandas.Series |
import pandas as pd
import re
import xlrd
def count_periodicos(lido):
resultados = re.findall(r'\t\d\d\d\d\d\d\d\w\t', lido)
pontuacao = 0
#como alguns numeros inuteis são encontrados pelo programa, aqui realizo a filtragem deles.
tudocerto = []
for x in resultados:
subs = x[1:5]
subs = subs + '-'
subs = subs + x[5:9]
tudocerto.append(subs)
artigos_validados = | pd.Series(tudocerto) | pandas.Series |
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--twas_folder", help="Folder with TWAS results")
parser.add_argument("--models_folder", help="Folder where the prediction models are saved")
parser.add_argument("--models",help="Comma separated list of models used for TWAS analysis")
parser.add_argument("--out",help="Output file")
args = parser.parse_args()
###Load TWAS results###
data=[]
for file in os.listdir(args.twas_folder):
temp=pd.read_csv(os.path.join(args.twas_folder,file))
temp["model"]=file.replace(".asso.csv","")
data.append(temp)
data=pd.concat(data)
###Get number of gene-model pairs##
models=args.models.split(",")
npairs=0
for model in models:
npairs+= | pd.read_csv(f"{args.models_folder}/{model}/output/{model}.cov",sep="\t",usecols=["GENE"]) | pandas.read_csv |
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
| tm.assert_frame_equal(rexpected, result) | pandas._testing.assert_frame_equal |
import multiprocessing
import xml.etree.ElementTree as ET
import pandas as pd
import os
from typing import List, Dict
from watchlib.utils import ECG, WorkoutRoute
from abc import ABC
from multiprocessing import Pool
import json
import numpy as np
import logging
logging.basicConfig(level=logging.INFO, filename="watchlib.log", filemode="w", format="%(asctime)s - %(levelname)s - %(message)s")
class DataManager(ABC):
def __init__(self, path: str) -> None:
self.path = path
self.export_path = os.path.join(path, "Export.xml")
self.ecg_path = os.path.join(path, "electrocardiograms")
self.workout_path = os.path.join(path, "workout-routes")
self.export_path_exists = os.path.exists(self.export_path)
self.ecg_path_exists = os.path.exists(self.ecg_path)
self.workout_path_exists = os.path.exists(self.workout_path)
def get_filenames_for(self, path: str) -> List[str]:
filenames = os.listdir(path)
filenames = [f for f in filenames if os.path.isfile(
os.path.join(path, f)) and not f.startswith(".")]
return filenames
def get_identifier_name(self, id):
if "Identifier" in id:
return id.split("Identifier")[1]
else:
return id.split("Type")[1]
class DataLoader(DataManager):
def __init__(self, path: str) -> None:
super().__init__(path)
def supports(self, data: str):
if data == "ecg":
return os.path.exists(self.ecg_path)
if data == "routes":
return os.path.exists(self.workout_path)
if data == "health":
return os.path.exists(self.export_path)
def load_health_data(self) -> Dict[str, pd.DataFrame]:
if self.supports("health"):
tree = ET.parse(self.export_path)
root = tree.getroot()
records = root.findall('Record')
data = {}
# Init all arrays
for record in records:
data[record.get("type")] = []
logging.info(f"[Data Loader] Loading {len(data)} health dataframes")
for record in records:
key = record.get("type")
key = self.get_identifier_name(key)
value = record.get("value")
time = record.get("creationDate")
data[key].append((time, value))
for key in data.keys():
df = pd.DataFrame(data[key], columns=["time", "value"])
df["time"] = pd.to_datetime(df["time"])
data[key] = df
return data
else:
logging.error("The health data path (Export.xml) doesnt exist")
return {}
# ----------
# ECG
# ----------
def load_ecg(self, ecg_name: str) -> ECG:
with open(os.path.join(self.ecg_path, ecg_name), "r", encoding="utf-8") as f:
return ECG(f.read(), ecg_name)
def load_ecgs(self) -> List[ECG]:
if self.supports("ecg"):
filenames = self.get_filenames_for(self.ecg_path)
logging.info(f"[Data Loader]\t\tLoading {len(filenames)} ECGs")
return [self.load_ecg(filename) for filename in filenames]
else:
logging.error("The ecg path doesnt exist")
return []
# ----------
# Workout Routes
# ----------
def load_route(self, route_name: str) -> WorkoutRoute:
with open(os.path.join(self.workout_path, route_name), "rb") as f:
route = ET.parse(f, parser=ET.XMLParser(
encoding="utf-8")).getroot()
return WorkoutRoute(route, route_name)
def load_routes(self, parallel=True) -> List[WorkoutRoute]:
if not self.supports("routes"):
logging.error("The workout routes path doesnt exist")
else:
if parallel:
return self.load_routes_par()
else:
return self.load_routes_seq()
def load_routes_seq(self) -> List[WorkoutRoute]:
filenames = self.get_filenames_for(self.workout_path)
print(f"[Data Loader]\t\tLoading {len(filenames)} workout routes...")
return [self.load_route(filename) for filename in filenames]
def load_routes_par(self) -> List[WorkoutRoute]:
filenames = self.get_filenames_for(self.workout_path)
pool = Pool(multiprocessing.cpu_count())
routes = pool.map(self.load_route, filenames)
pool.close()
pool.join()
print(f"[Data Loader]\t\tLoading {len(filenames)} workout routes in parallel...")
return routes
def count_routes(self):
return len(self.get_filenames_for(self.workout_path))
class CacheHandler(DataManager):
def __init__(self, path: str) -> None:
super().__init__(path)
self.cached_routes_path = os.path.join(self.workout_path, "cached_routes")
self.cached_export_data_path = os.path.join(path, "cached_export_data")
self.cached_route_animations_path = os.path.join(self.workout_path, "cached_animations")
self.__check_folders()
def __check_folders(self):
if "apple_health_export" in self.workout_path: # Only create folders if path is an apple export path
if not os.path.exists(self.cached_routes_path):
os.makedirs(self.cached_routes_path, exist_ok=True)
if not os.path.exists(self.cached_export_data_path):
os.makedirs(self.cached_export_data_path, exist_ok=True)
if not os.path.exists(self.cached_route_animations_path):
os.makedirs(self.cached_route_animations_path, exist_ok=True)
def delete_all_caches(self):
self.delete_all_health_data_caches()
self.delete_all_route_caches()
def delete_all_health_data_caches(self):
for file in self.get_filenames_for(self.cached_export_data_path):
self.delete_health_data_cache(file)
def delete_all_route_caches(self):
for file in self.get_filenames_for(self.cached_routes_path):
self.delete_route_cache(file)
def delete_all_animation_caches(self):
for file in self.get_filenames_for(self.cached_route_animations_path):
self.delete_animation_cache(file)
# Delete individual caches
def delete_health_data_cache(self, name: str):
logging.info("[Cache Handler] DELETE " + name)
os.remove(os.path.join(self.cached_export_data_path, name))
def delete_route_cache(self, name: str):
logging.info("[Cache Handler] DELETE " + name)
os.remove(os.path.join(self.cached_routes_path, name))
def delete_animation_cache(self, name: str):
logging.info("[Cache Handler] DELETE " + name)
os.remove(os.path.join(self.cached_route_animations_path, name))
def isCached(self, data: str) -> bool:
if data == "routes":
return len(self.get_filenames_for(self.cached_routes_path)) > 1
elif data == "health":
return len(self.get_filenames_for(self.cached_export_data_path)) > 1
elif data == "animation":
return
# ----------
# Cache routes
# ----------
def __cache_route(self, route: WorkoutRoute):
route.route.to_csv(os.path.join(
self.cached_routes_path, route.name), index=False)
def cache_routes(self, routes: List[WorkoutRoute]):
self.__check_folders()
logging.info(f"[Cache Handler] Caching {len(routes)} routes" )
for route in routes:
self.__cache_route(route)
def __load_route(self, filename) -> WorkoutRoute:
return WorkoutRoute(pd.read_csv(os.path.join(self.cached_routes_path, filename)), filename)
def load_routes(self) -> List[WorkoutRoute]:
if self.is_routes_cached():
routes = []
filenames = self.get_filenames_for(self.cached_routes_path)
print(f"[Cache Handler]\t\tLoadig {len(filenames)} cached routes...")
for filename in filenames:
routes.append(self.__load_route(filename))
return routes
else:
print("[ERROR]\t\tThe routes havent been cached yet.")
return []
def is_routes_cached(self):
self.__check_folders()
return self.isCached("routes")
# ---------
# Cache route animations
# ---------
def cache_route_animation(self, html: str, name: str):
self.__check_folders()
print(f"[Cache Handler]\t\tCaching animation: {name}")
with open(os.path.join(self.cached_route_animations_path, name), "w") as f:
f.write(html)
def load_route_animation(self, name: str) -> str:
if self.is_animation_cached(name):
print(f"[Cache Handler]\t\tLoading cached animation: {name}")
with open(os.path.join(self.cached_route_animations_path, name), "r") as f:
return f.read()
else:
print(f"[ERROR]\t\tThe animation {name} hasnt been cached yet.")
def is_animation_cached(self, name: str):
return os.path.exists(os.path.join(self.cached_route_animations_path, name))
# ----------
# Cached export data
# ----------
def cache_health_data(self, data: dict):
self.__check_folders()
print(f"[Cache Handler]\t\tCaching {len(data)} health dataframes...")
for key in data:
df = data[key]
df.to_csv(os.path.join(self.cached_export_data_path,
f"{key}.csv"), index=False)
def load_health_data_by_key(self, key: str) -> pd.DataFrame:
return pd.read_csv(os.path.join(self.cached_export_data_path, key))
def load_health_data(self) -> Dict[str, pd.DataFrame]:
if self.is_health_data_cached():
data = {}
filenames = self.get_filenames_for(self.cached_export_data_path)
print(f"[Cache Handler]\t\tLoading {len(filenames)} cached health dataframes...")
for filename in filenames:
id = self.get_identifier_name(filename.split(".csv")[0])
data[id] = self.load_health_data_by_key(filename)
return data
else:
logging.error("Health data hasnt been cached yet")
return {}
def is_health_data_cached(self):
return os.path.exists(self.cached_export_data_path)
class HealthDataHandler(DataManager):
def __init__(self, health_data: dict):
self.identifiers = self.load_identifiers()
self.health_data = health_data
def load_identifiers(self):
with open(os.path.join(os.path.dirname(__file__), "hk_identifiers.json"), "r") as f:
return json.load(f)
def get_event_identfiers(self):
events = []
for id in self.identifiers:
events.extend(self.identifiers[id]["event"])
return events
def get_quantity_identfiers(self, types: List[str] = ["sum", "mean"]):
quantities = []
for id in self.identifiers:
quantity = self.identifiers[id]["quantity"]
if "sum" in types:
quantities.extend(quantity["sum"])
if "mean" in types:
quantities.extend(quantity["mean"])
return quantities
def get_data_for(self, identifiers: List[str]):
data = {}
for id in identifiers:
if id in self.health_data:
data[id] = self.health_data[id]
#else:
#print("[WARNING]\t\tThe identifier {id} is not in health data.")
return data
def is_identifier(self, identifier: str, aggregate: str):
for id in self.identifiers:
if identifier in self.identifiers[id]["quantity"][aggregate]:
return True
return False
def group(self, identifiers, by = lambda x: x.split(" ")[0]):
data = self.get_data_for(identifiers)
grouped_dfs = []
for d in data:
if self.is_identifier(d, "sum"):
x = data[d].set_index("time").groupby(by).sum()
if self.is_identifier(d, "mean"):
x = data[d].set_index("time").groupby(by).mean()
x.columns = [d]
grouped_dfs.append(x)
return pd.concat(grouped_dfs, axis=1)
def drop_outliers(self, data, method, threshold):
if isinstance(data, pd.DataFrame):
cleaned = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import json
import os
import sys
import numpy as np
import pandas as pd
import sklearn.metrics
import report_resources
import matplotlib.pyplot as plt
import matplotlib
SCORES_COLS = ('id', 'label', 'score')
LINUX_C_FiLES = 20630 # in release 3.18
VULNERABLE_FILE_PROPORTION = 0.03
MIN_PRECISION = 0.5
def main(run_ids_file: str):
report_resources.ensure_output_dir_exists()
run_scores = _read_split_scores(run_ids_file)
run_pr_curves = {run: _pr_curve(scores) for run, scores in run_scores.items()}
vulnerable_c_files = VULNERABLE_FILE_PROPORTION * LINUX_C_FiLES
all_metrics = _aggregate(pd.concat([_add_alert_volume(pr_curve, vulnerable_c_files) for pr_curve in run_pr_curves.values()]))
_write_plot(all_metrics, output_file=os.path.join(report_resources.OUTPUT_DIR, 'lmc-alert-volume.pdf'))
def _write_plot(metrics, output_file: str):
fig = plt.figure()
gs = matplotlib.gridspec.GridSpec(2, 1, height_ratios=[1, 1])
ax0 = plt.subplot(gs[0])
ax0.plot(metrics.index, metrics['Precision'], label='Precision')
ax0.plot(metrics.index, metrics['Recall'], label='Recall')
ax0.set_ylabel('Score')
ax0.legend(loc='best')
ax1 = plt.subplot(gs[1], sharex=ax0)
tp_color = plt.get_cmap(report_resources.PALETTE)(2)
fp_color = plt.get_cmap(report_resources.PALETTE)(3)
ax1.bar(metrics.index, metrics['True positives'],
width=0.001, color=tp_color, edgecolor=tp_color,
label='True positives')
ax1.bar(metrics.index, metrics['False positives'], bottom=metrics['True positives'],
width=0.001, color=fp_color, edgecolor=fp_color,
label='False positives')
ax1.set_ylabel('Alerted files')
ax1.set_xlabel('Threshold')
ax1.legend(loc='best')
plt.subplots_adjust(hspace=0.0)
fig.savefig(os.path.join(output_file), bbox_inches='tight')
def _pr_curve(scores):
y_true = scores['label']
y_scores = scores['score']
precision, recall, threshold = sklearn.metrics.precision_recall_curve(y_true, y_scores)
threshold = list(threshold) + [1.0]
return pd.DataFrame(dict(Threshold=threshold,
Precision=precision,
Recall=recall))
def _add_alert_volume(pr_curve, vulnerable_c_files: int):
def volume_tp(row):
if row['Precision'] < MIN_PRECISION:
return None
else:
return row['Recall'] * vulnerable_c_files
def volume_fp(row):
if row['Precision'] < MIN_PRECISION:
return None
else:
return row['True positives'] * (1/row['Precision'] - 1)
result = pr_curve
result['True positives'] = result.apply(volume_tp, axis=1)
result['False positives'] = result.apply(volume_fp, axis=1)
return result
def _aggregate(metrics):
return metrics.groupby('Threshold').agg({'Precision': np.mean,
'Recall': np.mean,
'True positives': np.mean,
'False positives': np.mean})
def _read_split_scores(run_ids_file: str) -> dict:
split_dir = os.path.dirname(run_ids_file)
with open(run_ids_file) as f:
runs = [line.rstrip() for line in f.readlines()]
return {run: _read_run_scores(split_dir, run) for run in runs}
def _read_run_scores(split_dir: str, run: str):
with open(os.path.join(split_dir, "results_{}.json".format(run))) as f:
examples = json.load(f)['test']['examples']
return | pd.DataFrame(examples, columns=SCORES_COLS) | pandas.DataFrame |
from itertools import groupby
from collections import Counter
import numpy as np
import pandas as pd
from sklearn import metrics, cross_validation
from sklearn import linear_model
from sklearn import naive_bayes
import IPython
import viz
from viz.geom import hist
from tsa import stdout, logging
from tsa.lib import tabular
from tsa.lib.timer import Timer
from tsa.models import Source, Document, create_session
from tsa.science import numpy_ext as npx
from tsa.science import features, models
from tsa.science.corpora import MulticlassCorpus
from tsa.science.plot import plt, figure_path, distinct_styles
from tsa.science.summarization import metrics_dict
logger = logging.getLogger(__name__)
def source_corpus(source_name):
documents = Source.from_name(source_name)
corpus = MulticlassCorpus(documents)
corpus.apply_labelfunc(lambda doc: doc.label)
# assume the corpus is suitably balanced
corpus.extract_features(lambda doc: doc.document,
features.ngrams, ngram_max=2, min_df=2, max_df=1.0)
# corpus.extract_features(documents, features.liwc)
# corpus.extract_features(documents, features.afinn)
# corpus.extract_features(documents, features.anew)
return corpus
def sb5b_source_corpus():
# mostly like source_corpus except it selects just For/Against labels
documents = Source.from_name('sb5b')
corpus = MulticlassCorpus(documents)
corpus.apply_labelfunc(lambda doc: doc.label)
# polar_indices = (corpus.y == corpus.class_lookup['For']) | (corpus.y == corpus.class_lookup['Against'])
polar_classes = [corpus.class_lookup[label] for label in ['For', 'Against']]
polar_indices = np.in1d(corpus.y, polar_classes)
corpus = corpus.subset(polar_indices)
# ngram_max=2, min_df=0.001, max_df=0.95
corpus.extract_features(lambda doc: doc.document,
features.ngrams, ngram_max=2, min_df=2, max_df=1.0)
return corpus
def sample_corpus():
# return the corpus
session = create_session()
sb5b_documents = session.query(Document).join(Source).\
filter(Source.name == 'sb5b').all()
# filter(Document.label.in_(['For', 'Against'])).\
sample_documents = session.query(Document).join(Source).\
filter(Source.name == 'twitter-sample').all()
corpus = MulticlassCorpus(sb5b_documents + sample_documents)
corpus.apply_labelfunc(lambda doc: doc.source.name)
corpus.extract_features(lambda doc: doc.document,
features.ngrams, ngram_max=2, min_df=2, max_df=1.0)
return corpus
def debate_corpus():
session = create_session()
documents = session.query(Document).join(Source).\
filter(Source.name == 'debate08').\
filter(Document.label.in_(['Positive', 'Negative'])).\
order_by(Document.published).all()
corpus = MulticlassCorpus(documents)
corpus.apply_labelfunc(lambda doc: doc.label)
corpus.extract_features(lambda doc: doc.document,
features.ngrams, ngram_max=2, min_df=2, max_df=1.0)
return corpus
def iter_corpora():
# yield (corpus, title) tuples
yield source_corpus('rt-polarity'), 'Rotten Tomatoes Polarity'
yield sample_corpus(), 'In-sample/Out-of-sample'
yield sb5b_source_corpus(), 'SB-5 For/Against'
yield source_corpus('convote'), 'Congressional vote'
yield debate_corpus(), 'Debate08'
yield source_corpus('stanford-politeness-wikipedia'), 'Politeness on Wikipedia'
yield source_corpus('stanford-politeness-stackexchange'), 'Politeness on StackExchange'
def grid_plots(analysis_options):
for corpus, title in iter_corpora():
print(title)
grid_plot(corpus)
plt.title(title)
plt.savefig(figure_path('model-grid-%s' % title))
plt.cla()
def representation(analysis_options):
corpus = sb5b_source_corpus()
print('Tweets per person, by label')
for class_name in ['For', 'Against']:
print('Class =', class_name)
indices = corpus.y == corpus.class_lookup[class_name]
keyfunc = lambda doc: doc.details['Author'].split()[0].lower()
data = sorted(corpus.data[indices], key=keyfunc)
author_groups = groupby(data, keyfunc)
# map values . sum
lengths = np.array([len(list(group_iter)) for author, group_iter in author_groups])
# print 'Hist for authors with more than one tweet:'
# hist(lengths[lengths > 1])
print('Average # of documents per user', lengths.mean())
inlier_max = np.percentile(lengths, 99)
inliers = lengths[lengths < inlier_max]
print(' ditto excluding 99%-file ({:d}): {:.1f}'.format(
lengths.size - inliers.size, inliers.mean()))
IPython.embed()
def corpus_sandbox(analysis_options):
print('Exploring SB-5 corpus')
session = create_session()
sb5b_documents = session.query(Document).join(Source).\
filter(Source.name == 'sb5b').all()
print('Found %d documents' % len(sb5b_documents))
rows = [dict(
label=document.label,
inferred=bool(document.details.get('Inferred')),
source=document.details.get('Source', 'NA')) for document in sb5b_documents]
df = pd.DataFrame.from_records(rows)
# df_agg = df.groupby(['label', 'inferred'])
# df.pivot_table(values=['label'], rows=['inferred'], aggfunc=[len])
df.pivot_table(rows=['label', 'inferred'], aggfunc=[len])
df.pivot_table(rows=['label', 'source'], aggfunc=[len])
df.pivot_table(rows=['source'], aggfunc=[len])
# df_agg.plot(x='train', y='accuracy')
for document in sb5b_documents:
# 'weareohio' in document.document.lower(), .document
print(document.details.get('Source'), document.label)
IPython.embed()
def sb5_self_train(analysis_options):
incestuous_model = linear_model.LogisticRegression(fit_intercept=False, penalty=penalty)
incestuous_model.fit(unlabeled_corpus.X, unlabeled_pred_y)
# apply model to data we know for sure
incestuous_pred_y = incestuous_model.predict(labeled_corpus.X)
# evaluate predictions
# print metrics_summary(labeled_corpus.y, incestuous_pred_y)
print('accuracy on training set after extrapolation', metrics.accuracy_score(labeled_corpus.y, incestuous_pred_y))
# we want to compare the confidence of the bootstrap on the things it gets wrong vs. a straight logistic regression
bootstrap_model = models.Bootstrap(linear_model.LogisticRegression,
fit_intercept=False, penalty=penalty, C=1.0)
bootstrap_model.fit(labeled_corpus.X, labeled_corpus.y, n_iter=100, proportion=1.0)
bootstrap_model.predict(labeled_corpus.X)
bootstrap_mean_coef = np.mean(bootstrap_model.coefs_, axis=0)
bootstrap_var_coef = np.var(bootstrap_model.coefs_, axis=0)
print('bootstrap_model')
hist(bootstrap_mean_coef)
print(' {:.2%} coefs == 0'.format((bootstrap_mean_coef == 0).mean()))
def grid_hists(analysis_options):
for corpus, corpus_name in iter_corpora():
grid_hist(corpus)
plt.title(corpus_name)
plt.gcf().set_size_inches(8, 5)
plt.savefig(figure_path('grid-hist-%s.pdf' % corpus_name))
plt.cla()
def grid_hist(corpus):
corpus.X = corpus.X.tocsr()
logger.info('X.shape = %s, y.shape = %s', corpus.X.shape, corpus.y.shape)
# model = linear_model.RandomizedLogisticRegression(penalty='l2')
# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RandomizedLogisticRegression.html
bootstrap_coefs = models.Bootstrap(corpus.X, corpus.y, n_iter=100, proportion=1.0, penalty='l1', C=1.0)
coefs_means = np.mean(bootstrap_coefs, axis=0)
coefs_variances = np.var(bootstrap_coefs, axis=0)
bootstrap_coefs = bootstrap_model(X, y, n_iter=n_iter, proportion=0.5)
# fit_intercept=False, penalty=penalty, C=C
logger.info('coefs_means.shape = %s, coefs_variances.shape = %s', coefs_means.shape, coefs_variances.shape)
nonzero = coefs_means != 0
substantial = np.abs(coefs_means) > 0.1
print('nonzero coef density = {:.2%}'.format(nonzero.mean()))
print('> 0.1 coef density = {:.2%}'.format(substantial.mean()))
means = coefs_means[nonzero]
plt.cla()
plt.hist(means, bins=25, normed=True)
plt.xlabel('Frequency of (L1 Logistic Regression) bootstrapped coefficients')
plt.xlim(-2, 2)
raise IPython.embed()
def many_models(analysis_options):
# recreate 10fold-multiple-models.pdf
filepath = 'data/incremental-training-multiple-models-10folds.tsv'
table = | pd.io.parsers.read_table(filepath) | pandas.io.parsers.read_table |
from datetime import datetime
from typing import Dict, Optional, List
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from src.constants import remove_from_plots
from src.data.prices import get_prices, round_price
from src.plot.asset_history import plot_asset_history
import logging
logger = logging.getLogger(__name__)
QUOTE_COINS = ['USDT', 'BUSD', 'RUB']
class OrdersAnalyser:
def __init__(self, client_helper, orders):
self._orders = self.prepare_dataframe(orders)
self.client_helper = client_helper
self.width = 1200
self.height = 400
@property
def orders(self):
return self._orders
@staticmethod
def prepare_dataframe(orders: pd.DataFrame):
orders = orders.copy()
numerical_columns = ['price', 'origQty', 'executedQty', 'cummulativeQuoteQty']
for col in numerical_columns:
orders[col] = orders[col].astype(float)
# Use only filled orders
orders = orders[orders['status'] == 'FILLED']
# Replace payments with BUSD to USDT to simplify
orders.loc[orders['quote_coin'] == 'BUSD', 'quote_coin'] = 'USDT'
# Calculate executedCorrectedQty, needed for calculation mean buying price
updated_orders = []
for _, pair_orders in orders.groupby(['base_coin']):
updated_orders.append(calculate_corrected_balance_for_pair(pair_orders))
orders = pd.concat(updated_orders)
assert np.all(np.isin(orders['quote_coin'].unique(), QUOTE_COINS)), f'Only {QUOTE_COINS} quote coins allowed'
return orders
def calculate_mean_price(self):
orders = self._orders
average_prices = []
for base_coin, pair_orders in orders.groupby(['base_coin']):
quote_coin = pair_orders['quote_coin'].unique()
if len(quote_coin) > 1:
msg = f'can calculate average purchase price only with single quote_coin, ' \
f'but for {base_coin} there is several: {quote_coin}'
raise ValueError(msg)
quote_coin = quote_coin[0]
mask_buy = pair_orders['side'] == 'BUY'
average_price = (pair_orders.loc[mask_buy, 'price'] * pair_orders.loc[
mask_buy, 'executedCorrectedQty']).sum() / pair_orders.loc[mask_buy, 'executedCorrectedQty'].sum()
average_prices.append(
{'base_coin': base_coin, 'quote_coin': quote_coin, 'average_price': average_price,
'n_purchases': mask_buy.sum(), 'n_sales': (~mask_buy).sum()})
average_prices = pd.DataFrame(average_prices)
return average_prices
def plot_transactions(self, base_coin: str = 'BTC', price_history: pd.DataFrame = None,
add_mean_price: bool = True, add_last_price: bool = True):
plot_df = self.orders[self.orders['base_coin'] == base_coin]
assert np.all(
np.isin(plot_df['quote_coin'].unique(), QUOTE_COINS)), f'Only {QUOTE_COINS} quote coins are acceptable'
fig = px.scatter(plot_df, x='date', y="price", size='executedQty', color='side',
title=f'{base_coin} transactions', size_max=10, hover_data=['cummulativeQuoteQty'])
if price_history is not None:
fig.add_trace(
go.Scatter(x=price_history['date'], y=price_history['Close'], mode='lines', name='history',
marker_color='grey'))
if add_mean_price:
mean_price = self.calculate_mean_price()
mean_price = mean_price.loc[mean_price['base_coin'] == base_coin, 'average_price'].item()
fig.add_hline(y=mean_price, line_dash="dot",
annotation_text=f'average purchase price = {round_price(mean_price)} usdt',
annotation_position="bottom right")
if add_last_price:
last_price = price_history.iloc[-1]
fig.add_annotation(
x=last_price['date'],
y=last_price['Close'],
text=f"Last price = {round_price(last_price['Close'])} usdt",
arrowhead=2,
)
fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
])
),
type='date'
)
fig.update_layout(yaxis_title='USDT', width=self.width, height=self.height, xaxis_fixedrange=False,
yaxis_fixedrange=False)
return fig
def plot_transactions_many(self, coins):
fig_dict = {}
for base_coin in coins: # ['LTC', 'ETH']:
price_history = self.client_helper.get_historical_prices(base_coin + 'USDT', start_date='1 Jan, 2021')
fig = self.plot_transactions(base_coin, price_history)
fig_dict[base_coin] = fig
return fig_dict
def prepare_coins_asset_history(self) -> Dict[str, pd.DataFrame]:
coins_asset_history = {}
prices = get_prices(self.client_helper)
for base_coin, pair_orders in self.orders.groupby('base_coin'):
if base_coin in remove_from_plots:
continue
price_history = prices[['date', base_coin]]
price_history.columns = ['date', 'price']
asset_history = calculate_asset_worth_history(pair_orders, price_history)
coins_asset_history[base_coin] = asset_history
return coins_asset_history
def plot_coins_asset_history(self, coins_asset_history: Dict[str, pd.DataFrame], items: Optional[List] = None):
fig_dict = {}
if items is None:
items = coins_asset_history.keys()
for item in items:
plot_df = coins_asset_history[item]
fig = plot_asset_history(plot_df, title=f'{item} asset value history', width=self.width, height=self.height)
fig_dict[item] = fig
return fig_dict
def plot_full_asset_history(self, coins_asset_history: Dict[str, pd.DataFrame], items: Optional[List] = None):
cash_df = []
coin_df = []
if items is None:
items = coins_asset_history.keys()
for item in items:
plot_df = coins_asset_history[item]
cash_df.append(plot_df[['date', 'usdt_cash_in_cum']].set_index('date'))
coin_df.append(plot_df[['date', 'coin_cum_usdt_value']].set_index('date'))
cash_df = pd.concat(cash_df, axis=1).ffill().sum(axis=1)
cash_df.name = 'usdt_cash_in_cum'
coin_df = pd.concat(coin_df, axis=1).ffill().sum(axis=1)
coin_df.name = 'coin_cum_usdt_value'
full_asset_history = pd.concat([cash_df, coin_df], axis=1).reset_index().ffill()
fig = plot_asset_history(full_asset_history, title='Asset usdt value history', width=self.width,
height=self.height)
return fig
def asset_usdt_composition(self, prices: pd.DataFrame):
asset_df = self._orders[self._orders['side'] == 'BUY'].groupby('base_coin')[
'executedCorrectedQty'].sum().reset_index()
def convert_to_usd_price(raw, ):
try:
return raw['executedCorrectedQty'] * prices.loc[prices['base_coin'] == raw['base_coin'], 'price'].item()
except ValueError:
logger.info(f'{raw["base_coin"]} coin is not listed in binance, price is not available')
return None
asset_df['usdt_value'] = asset_df.apply(convert_to_usd_price, axis=1)
asset_df = asset_df[asset_df['base_coin'] != 'USDT']
return asset_df
def plot_asset_usdt_composition(self, prices: pd.DataFrame):
"""
Plot the most actual composition of asset in usdt.
Args:
history_assets: Table with columns ['type', 'date', 'totalAssetOfBtc', all coins in asset].
prices: Table with columns ['price', 'base_coin', 'quote_coin'].
Returns:
"""
plot_df = self.asset_usdt_composition(prices)
fig = px.pie(plot_df, values='usdt_value', names='base_coin')
fig.update_layout(
title=f'Asset composition in usdt. Total value = {plot_df["usdt_value"].sum().round(1)} usdt.',
autosize=False, width=500, height=500)
return fig
def generate_asset_table(order_analyser: OrdersAnalyser, current_prices: pd.DataFrame) -> pd.DataFrame:
asset_df = order_analyser.calculate_mean_price()
asset_df = pd.merge(asset_df, current_prices.rename(columns={'price': 'current_price'}),
on=['base_coin', 'quote_coin'], how='left')
asset_df['price_change_usd'] = asset_df['current_price'] - asset_df['average_price']
asset_df['price_change_percent'] = asset_df['price_change_usd'] / asset_df['average_price'] * 100
asset_df = pd.merge(asset_df, order_analyser.asset_usdt_composition(current_prices), how='outer', on='base_coin')
asset_df = asset_df.rename(columns={'executedCorrectedQty': 'coins_count'})
asset_df['usdt_share_percent'] = asset_df['usdt_value'] / asset_df['usdt_value'].sum() * 100
return asset_df.sort_values('usdt_share_percent', ascending=False).reset_index(drop=True)
def calculate_corrected_balance_for_pair(pair_orders: pd.DataFrame):
assert len(pair_orders[
'base_coin'].unique()) == 1, f'DataFrame should contain one base coin, but there are several: {pair_orders["base_coin"].unique()}'
pair_orders['executedCorrectedQty'] = pair_orders['executedQty'] # corrected coin quantity by selled tokens
pair_orders['usdtValue'] = np.nan # usdt value of buyed tokens
pair_orders['usdtValueCorrection'] = np.nan # delta correction usdt value of buyed tokens
pair_orders.loc[pair_orders['side'] == 'SELL', ['executedCorrectedQty', 'usdtValue']] = None
pair_orders['usdtQtyWeight'] = np.nan # weights of buy orders in usdt terms
pair_orders = pair_orders.reset_index(drop=True)
cash_usdt_amount = 0
for i in range(len(pair_orders)):
mask_slice = pair_orders.index <= i
mask_buy = pair_orders['side'] == 'BUY'
mask_slice_buy = mask_slice & mask_buy
if pair_orders.loc[mask_slice, 'side'].iloc[-1] == 'SELL':
pair_orders.loc[mask_slice_buy, 'usdtValue'] = pair_orders.loc[mask_slice_buy, 'cummulativeQuoteQty'] * \
pair_orders.loc[mask_slice, 'price'].iloc[-1] / \
pair_orders.loc[mask_slice_buy, 'price']
pair_orders.loc[mask_slice_buy, 'usdtQtyWeight'] = pair_orders.loc[mask_slice_buy, 'usdtValue'] / \
pair_orders.loc[mask_slice_buy, 'usdtValue'].sum()
sell_amount = pair_orders.loc[mask_slice, 'cummulativeQuoteQty'].iloc[-1]
pair_orders.loc[mask_slice_buy, 'usdtValueCorrection'] = sell_amount * pair_orders.loc[
mask_slice_buy, 'usdtQtyWeight']
cash_usdt_amount += sell_amount
# Reduce corrected coin quantity with ratio of decreased usdtValue from sell order
if pair_orders.loc[mask_slice_buy, 'executedCorrectedQty'].sum() == 0:
raise ValueError(
"Bad balance error, looks like not all orders are listed. There are no coins available for selling")
# pair_orders.loc[mask_slice_buy, 'executedCorrectedQty'] -= pair_orders.loc[mask_slice_buy, 'usdtValueCorrection']/pair_orders.loc[mask_slice_buy, 'price']
pair_orders.loc[mask_slice_buy, 'executedCorrectedQty'] *= 1 - pair_orders.loc[
mask_slice_buy, 'usdtValueCorrection'] / pair_orders.loc[mask_slice_buy, 'usdtValue']
pair_orders = pair_orders.drop(['usdtValue', 'usdtValueCorrection', 'usdtQtyWeight'], axis=1)
return pair_orders
def calculate_asset_worth_history(pair_orders, price_history):
price_history = price_history.copy()
pair_orders = pair_orders.apply(calc_transfers, axis=1)
pair_orders['usdt_cash_in_cum'] = pair_orders['usdt_cash_transfer'].cumsum()
pair_orders['coin_cum'] = pair_orders['coin_transfer'].cumsum()
price_history.date = price_history.date.dt.to_period('D')
asset_history = pair_orders[['date', 'usdt_cash_in_cum', 'coin_cum']]
asset_history_last_row = asset_history.iloc[0].copy()
asset_history_last_row[:] = np.nan
asset_history_last_row['date'] = pd.Timestamp(datetime.today().strftime('%Y-%m-%d'))
asset_history = pd.concat([asset_history, asset_history_last_row.to_frame().T])
asset_history = asset_history.set_index('date').resample('D', label='right',
closed='right').last().ffill().bfill().reset_index()
asset_history.date = asset_history.date.dt.to_period('D')
asset_history = pd.merge(asset_history, price_history, on='date', how='left')
asset_history['coin_cum_usdt_value'] = asset_history['coin_cum'] * asset_history['price']
asset_history.date = asset_history.date.dt.to_timestamp()
asset_history.date -= | pd.Timedelta('1 day') | pandas.Timedelta |
# Copyright 2017-2021 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import functools
import importlib
import inspect
import logging
import os
import sys
import threading
import warnings
import six
with warnings.catch_warnings():
warnings.simplefilter("ignore", Warning)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
try:
import pandas as pd
except ImportError:
raise RuntimeError(
"guild.ipy requires pandas - install it first before using "
"this module (see https://pandas.pydata.org/pandas-docs/stable/"
"install.html for help)"
)
# ipy makes use of the full Guild API and so, like main_bootstrap,
# requires the external modules.
from guild import main_bootstrap
main_bootstrap.ensure_external_path()
from guild import batch_util
from guild import click_util
from guild import config
from guild import exit_code
from guild import index as indexlib
from guild import model_proxy
from guild import op_util
from guild import opref as opreflib
from guild import run as runlib
from guild import run_util
from guild import summary
from guild import util
from guild import var
from guild.commands import runs_impl
log = logging.getLogger("guild")
RUN_DETAIL = [
"id",
"operation",
"status",
"started",
"stopped",
"label",
"run_dir",
]
DEFAULT_MAX_TRIALS = 20
class RunException(Exception):
def __init__(self, run, from_exc):
super(RunException, self).__init__(run, from_exc)
self.run = run
self.from_exc = from_exc
class RunError(RunException):
pass
class RunTerminated(RunException):
pass
class OutputTee(object):
def __init__(self, fs, lock):
self._fs = fs
self._lock = lock
def write(self, s):
with self._lock:
for f in self._fs:
f.write(s)
def flush(self):
with self._lock:
for f in self._fs:
f.flush()
class RunOutput(object):
def __init__(self, run, summary=None):
self.run = run
self.summary = summary
self._f = None
self._f_lock = None
self._stdout = None
self._stderr = None
def __enter__(self):
self._f = open(self.run.guild_path("output"), "w")
self._f_lock = threading.Lock()
self._stdout = sys.stdout
sys.stdout = OutputTee(self._tee_fs(sys.stdout), self._f_lock)
self._stderr = sys.stderr
sys.stderr = OutputTee(self._tee_fs(sys.stderr), self._f_lock)
def _tee_fs(self, iof):
fs = [iof, self._f]
if self.summary:
fs.append(self.summary)
return fs
def __exit__(self, *exc):
with self._f_lock:
self._f.close()
if self.summary:
self.summary.close()
sys.stdout = self._stdout
sys.stderr = self._stderr
@functools.total_ordering
class RunIndex(object):
def __init__(self, run, fmt):
self.value = run
self.run = run # backward compatible alias
self.fmt = fmt
def __str__(self):
return self.value.short_id
def __eq__(self, x):
return self._x_id(x) == self.value.id
def __lt__(self, x):
return self.value.id < self._x_id(x)
@staticmethod
def _x_id(x):
if isinstance(x, six.string_types):
return x
elif isinstance(x, RunIndex):
return x.value.id
return None
class RunsSeries(pd.Series):
@property
def _constructor(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, **kw):
self.to_frame().delete(**kw)
def info(self, **kw):
_print_run_info(self[0], **kw)
def scalars(self):
return _runs_scalars([self[0].value])
def scalars_detail(self):
return _runs_scalars_detail([self[0].value])
def flags(self):
return _runs_flags([self[0].value])
def compare(self):
return _runs_compare([self[0]])
class RunsDataFrame(pd.DataFrame):
@property
def _constructor(self):
return RunsDataFrame
@property
def _constructor_sliced(self):
return RunsSeries
@property
def _constructor_expanddim(self):
return RunsDataFrame
def delete(self, permanent=False):
runs = self._runs()
var.delete_runs(runs, permanent)
return [run.id for run in runs]
def _runs(self):
return [row[1][0].value for row in self.iterrows()]
def _items(self):
return [row[1][0] for row in self.iterrows()]
# pylint: disable=arguments-differ
def info(self, *args, **kw):
self.loc[0].info(*args, **kw)
def scalars(self):
return _runs_scalars(self._runs())
def scalars_detail(self):
return _runs_scalars_detail(self._runs())
def flags(self):
return _runs_flags(self._runs())
def compare(self):
return _runs_compare(self._items())
class Batch(object):
def __init__(self, gen_trials, op, flag_vals, opts):
self.gen_trials = gen_trials
self.op = op
self.flag_vals = _coerce_range_functions(flag_vals)
self.opts = opts
def __call__(self):
runs = []
results = []
prev_results_cb = lambda: (runs, results)
for trial in self.gen_trials(self.flag_vals, prev_results_cb, **self.opts):
trial_flag_vals, trial_attrs = _split_gen_trial(trial)
print(
"Running %s (%s):"
% (self.op.__name__, op_util.flags_desc(trial_flag_vals))
)
run, result = _run(self.op, trial_flag_vals, self.opts, trial_attrs)
runs.append(run)
results.append(result)
return runs, results
def _split_gen_trial(trial):
if isinstance(trial, tuple):
assert len(trial) == 2, ("generated trial must be a two-tuple or a dict", trial)
return trial
else:
return trial, {}
def _coerce_range_functions(flag_vals):
return {name: _coerce_range_function(val) for name, val in flag_vals.items()}
def _coerce_range_function(val):
if isinstance(val, RangeFunction):
return str(val)
return val
class RangeFunction(object):
def __init__(self, name, *args):
self.name = name
self.args = args
def __str__(self):
args = ":".join([str(arg) for arg in self.args])
return "%s[%s]" % (self.name, args)
def batch_gen_trials(flag_vals, _prev_trials_cb, max_trials=None, **kw):
if kw:
log.warning("ignoring batch config: %s", kw)
max_trials = max_trials or DEFAULT_MAX_TRIALS
trials = 0
for trial_flag_vals in batch_util.expand_flags(flag_vals):
if trials >= max_trials:
return
trials += 1
yield trial_flag_vals
def optimizer_trial_generator(model_op):
main_mod = _optimizer_module(model_op.module_name)
try:
return main_mod.gen_trials
except AttributeError:
raise TypeError(
"%s optimizer module does not implement gen_trials" % main_mod.__name__
)
def _optimizer_module(module_name):
return importlib.import_module(module_name)
def uniform(low, high):
return RangeFunction("uniform", low, high)
def loguniform(low, high):
return RangeFunction("loguniform", low, high)
def run(op, *args, **kw):
if not callable(op):
raise ValueError("op must be callable")
opts = _pop_opts(kw)
flag_vals = _init_flag_vals(op, args, kw)
run = _init_runner(op, flag_vals, opts)
return run()
def _pop_opts(kw):
opts = {}
for name in list(kw):
if name[:1] == "_":
opts[name[1:]] = kw.pop(name)
return opts
def _init_flag_vals(op, args, kw):
# pylint: disable=deprecated-method
op_f = _op_f(op)
op_flag_vals = inspect.getcallargs(op_f, *args, **kw)
_remove_bound_method_self(op_f, op_flag_vals)
return _coerce_slice_vals(op_flag_vals)
def _op_f(op):
assert callable(op), repr(op)
if inspect.isfunction(op) or inspect.ismethod(op):
return op
assert hasattr(op, "__call__")
return op.__call__
def _remove_bound_method_self(op, op_flag_vals):
im_self = util.find_apply(
[
lambda: getattr(op, "__self__", None),
lambda: getattr(op, "im_self", None),
]
)
if im_self:
for key, val in op_flag_vals.items():
if val is im_self:
del op_flag_vals[key]
break
else:
assert False, (op_flag_vals, im_self)
def _coerce_slice_vals(flag_vals):
return {name: _coerce_slice_val(val) for name, val in flag_vals.items()}
def _coerce_slice_val(val):
if isinstance(val, slice):
return uniform(val.start, val.stop)
return val
def _init_runner(op, flag_vals, opts):
return util.find_apply(
[_optimize_runner, _batch_runner, _single_runner], op, flag_vals, opts
)
def _optimize_runner(op, flag_vals, opts):
optimizer = opts.get("optimizer")
if not optimizer:
return _maybe_random_runner(op, flag_vals, opts)
opts = _filter_kw(opts, ["optimizer"])
return Batch(_init_gen_trials(optimizer), op, flag_vals, opts)
def _filter_kw(opts, keys):
return {k: v for k, v in opts.items() if k not in keys}
def _maybe_random_runner(op, flag_vals, opts):
assert not opts.get("optimizer"), opts
for val in flag_vals.values():
if isinstance(val, RangeFunction):
return Batch(_init_gen_trials("random"), op, flag_vals, opts)
return None
def _init_gen_trials(optimizer):
try:
model_op, _name = model_proxy.resolve_plugin_model_op(optimizer)
except model_proxy.NotSupported:
raise TypeError("optimizer %r is not supported" % optimizer)
else:
return optimizer_trial_generator(model_op)
def _batch_runner(op, flag_vals, opts):
for val in flag_vals.values():
if isinstance(val, list):
return Batch(batch_gen_trials, op, flag_vals, opts)
return None
def _single_runner(op, flag_vals, opts):
return lambda: _run(op, flag_vals, opts)
def _run(op, flag_vals, opts, extra_attrs=None):
run = _init_run()
_init_run_attrs(run, op, flag_vals, opts, extra_attrs)
summary = _init_output_scalars(run, opts)
try:
with RunOutput(run, summary):
_write_proc_lock(run)
with util.Chdir(run.path):
result = op(**flag_vals)
except KeyboardInterrupt as e:
exit_status = exit_code.KEYBOARD_INTERRUPT
util.raise_from(RunTerminated(run, e), e)
except Exception as e:
exit_status = exit_code.DEFAULT_ERROR
util.raise_from(RunError(run, e), e)
else:
exit_status = 0
return run, result
finally:
_finalize_run(run, exit_status)
def _init_run():
run_id = runlib.mkid()
run_dir = os.path.join(var.runs_dir(), run_id)
run = runlib.Run(run_id, run_dir)
run.init_skel()
return run
def _init_run_attrs(run, op, flag_vals, opts, extra_attrs):
opref = opreflib.OpRef("func", "", "", "", _op_name(op, opts))
run.write_opref(opref)
run.write_attr("started", runlib.timestamp())
run.write_attr("flags", flag_vals)
run.write_attr("label", _run_label(flag_vals, opts))
if extra_attrs:
for name, val in extra_attrs.items():
run.write_attr(name, val)
def _op_name(op, opts):
return opts.get("op_name") or _default_op_name(op)
def _default_op_name(op):
if inspect.isfunction(op) or inspect.ismethod(op):
return op.__name__
return op.__class__.__name__
def _run_label(flag_vals, opts):
return op_util.run_label(_label_template(opts), flag_vals)
def _label_template(opts):
return util.find_apply([_explicit_label, _tagged_label], opts)
def _explicit_label(opts):
return opts.get("label")
def _tagged_label(opts):
try:
tag = opts["tag"]
except KeyError:
return None
else:
return "%s ${default_label}" % tag
def _init_output_scalars(run, opts):
config = opts.get("output_scalars", summary.DEFAULT_OUTPUT_SCALARS)
if not config:
return None
abs_guild_path = os.path.abspath(run.guild_path())
return summary.OutputScalars(config, abs_guild_path)
def _write_proc_lock(run):
op_util.write_proc_lock(os.getpid(), run)
def _finalize_run(run, exit_status):
run.write_attr("exit_status", exit_status)
run.write_attr("stopped", runlib.timestamp())
op_util.delete_proc_lock(run)
def runs(**kw):
runs = runs_impl.filtered_runs(_runs_cmd_args(**kw))
data, cols = _format_runs(runs)
return RunsDataFrame(data=data, columns=cols)
def _runs_cmd_args(
operations=None,
labels=None,
tags=None,
comments=None,
running=False,
completed=False,
error=False,
terminated=False,
pending=False,
staged=False,
unlabeled=None,
marked=False,
unmarked=False,
started=None,
digest=None,
deleted=None,
remote=None,
):
operations = operations or ()
labels = labels or ()
tags = tags or ()
comments = comments or ()
return click_util.Args(
filter_ops=operations,
filter_labels=labels,
filter_tags=tags,
filter_comments=comments,
status_running=running,
status_completed=completed,
status_error=error,
status_terminated=terminated,
status_pending=pending,
status_staged=staged,
filter_unlabeled=unlabeled,
filter_marked=marked,
filter_unmarked=unmarked,
filter_started=started,
filter_digest=digest,
deleted=deleted,
remote=remote,
)
def _format_runs(runs):
cols = (
"run",
"operation",
"started",
"status",
"label",
)
data = [_format_run(run, cols) for run in runs]
return data, cols
def _format_run(run, cols):
fmt = run_util.format_run(run)
return [_run_attr(run, name, fmt) for name in cols]
def _run_attr(run, name, fmt):
if name == "run":
return RunIndex(run, fmt)
elif name in ("operation",):
return fmt[name]
elif name in ("started", "stopped"):
return _datetime(run.get(name))
elif name in ("label",):
return run.get(name, "")
elif name == "time":
return _run_time(run)
else:
return getattr(run, name)
def _datetime(ts):
if ts is None:
return None
return datetime.datetime.fromtimestamp(int(ts / 1000000))
def _run_time(run):
formatted_time = util.format_duration(run.get("started"), run.get("stopped"))
return pd.to_timedelta(formatted_time)
def _print_run_info(item, output=False, scalars=False):
for name in RUN_DETAIL:
print("%s: %s" % (name, item.fmt.get(name, "")))
print("flags:", end="")
print(run_util.format_attr(item.value.get("flags", "")))
if scalars:
print("scalars:")
for s in indexlib.iter_run_scalars(item.value):
print(" %s: %f (step %i)" % (s["tag"], s["last_val"], s["last_step"]))
if output:
print("output:")
for line in run_util.iter_output(item.value):
print(" %s" % line, end="")
def _runs_scalars(runs):
data = []
cols = [
"run",
"prefix",
"tag",
"first_val",
"first_step",
"last_val",
"last_step",
"min_val",
"min_step",
"max_val",
"max_step",
"avg_val",
"count",
"total",
]
for run in runs:
for s in indexlib.iter_run_scalars(run):
data.append(s)
return pd.DataFrame(data, columns=cols)
def _runs_scalars_detail(runs):
from guild import tfevent
data = []
cols = [
"run",
"path",
"tag",
"val",
"step",
]
for run in runs:
for path, _run_id, scalars in tfevent.scalar_readers(run.dir):
rel_path = os.path.relpath(path, run.dir)
for tag, val, step in scalars:
data.append([run, rel_path, tag, val, step])
return pd.DataFrame(data, columns=cols)
def _runs_flags(runs):
data = [_run_flags_data(run) for run in runs]
return pd.DataFrame(data)
def _run_flags_data(run):
data = run.get("flags") or {}
data[_run_flags_key(data)] = run.id
return data
def _run_flags_key(flag_vals):
run_key = "run"
while run_key in flag_vals:
run_key = "_" + run_key
return run_key
def _runs_compare(items):
core_cols = ["run", "operation", "started", "time", "status", "label"]
flag_cols = set()
scalar_cols = set()
data = []
for item in items:
row_data = {}
data.append(row_data)
# Order matters here - we want flag vals to take precedence
# over scalar vals with the same name.
_apply_scalar_data(item.value, scalar_cols, row_data)
_apply_flag_data(item.value, flag_cols, row_data)
_apply_run_core_data(item, core_cols, row_data)
cols = core_cols + sorted(flag_cols) + _sort_scalar_cols(scalar_cols, flag_cols)
return | pd.DataFrame(data, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
import unittest
import os
import pathlib
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import logging
from neuralprophet import (
NeuralProphet,
df_utils,
time_dataset,
configure,
)
log = logging.getLogger("NP.test")
log.setLevel("WARNING")
log.parent.setLevel("WARNING")
DIR = pathlib.Path(__file__).parent.parent.absolute()
DATA_DIR = os.path.join(DIR, "example_data")
PEYTON_FILE = os.path.join(DATA_DIR, "wp_log_peyton_manning.csv")
AIR_FILE = os.path.join(DATA_DIR, "air_passengers.csv")
YOS_FILE = os.path.join(DATA_DIR, "yosemite_temps.csv")
class UnitTests(unittest.TestCase):
plot = False
def test_impute_missing(self):
"""Debugging data preprocessing"""
log.info("testing: Impute Missing")
allow_missing_dates = False
df = pd.read_csv(PEYTON_FILE)
name = "test"
df[name] = df["y"].values
if not allow_missing_dates:
df_na, _ = df_utils.add_missing_dates_nan(df.copy(deep=True), freq="D")
else:
df_na = df.copy(deep=True)
to_fill = pd.isna(df_na["y"])
# TODO fix debugging printout error
log.debug("sum(to_fill): {}".format(sum(to_fill.values)))
# df_filled, remaining_na = df_utils.fill_small_linear_large_trend(
# df.copy(deep=True),
# column=name,
# allow_missing_dates=allow_missing_dates
# )
df_filled = df.copy(deep=True)
df_filled.loc[:, name], remaining_na = df_utils.fill_linear_then_rolling_avg(
df_filled[name], limit_linear=5, rolling=20
)
# TODO fix debugging printout error
log.debug("sum(pd.isna(df_filled[name])): {}".format(sum(pd.isna(df_filled[name]).values)))
if self.plot:
if not allow_missing_dates:
df, _ = df_utils.add_missing_dates_nan(df)
df = df.loc[200:250]
fig1 = plt.plot(df["ds"], df[name], "b-")
fig1 = plt.plot(df["ds"], df[name], "b.")
df_filled = df_filled.loc[200:250]
# fig3 = plt.plot(df_filled['ds'], df_filled[name], 'kx')
fig4 = plt.plot(df_filled["ds"][to_fill], df_filled[name][to_fill], "kx")
plt.show()
def test_time_dataset(self):
# manually load any file that stores a time series, for example:
df_in = pd.read_csv(AIR_FILE, index_col=False)
log.debug("Infile shape: {}".format(df_in.shape))
n_lags = 3
n_forecasts = 1
valid_p = 0.2
df_train, df_val = df_utils.split_df(df_in, n_lags, n_forecasts, valid_p, inputs_overbleed=True)
# create a tabularized dataset from time series
df = df_utils.check_dataframe(df_train)
data_params = df_utils.init_data_params(df, normalize="minmax")
df = df_utils.normalize(df, data_params)
inputs, targets = time_dataset.tabularize_univariate_datetime(
df,
n_lags=n_lags,
n_forecasts=n_forecasts,
)
log.debug(
"tabularized inputs: {}".format(
"; ".join(["{}: {}".format(inp, values.shape) for inp, values in inputs.items()])
)
)
def test_normalize(self):
for add in [0, -1, 0.00000001, -0.99999999]:
length = 1000
days = pd.date_range(start="2017-01-01", periods=length)
y = np.zeros(length)
y[1] = 1
y = y + add
df = pd.DataFrame({"ds": days, "y": y})
m = NeuralProphet(
normalize="soft",
)
data_params = df_utils.init_data_params(
df,
normalize=m.normalize,
covariates_config=m.config_covar,
regressor_config=m.regressors_config,
events_config=m.events_config,
)
df_norm = df_utils.normalize(df, data_params)
def test_auto_batch_epoch(self):
check = {
"1": (1, 1000),
"10": (2, 1000),
"100": (8, 320),
"1000": (32, 64),
"10000": (128, 12),
"100000": (128, 5),
}
for n_data in [1, 10, int(1e2), int(1e3), int(1e4), int(1e5)]:
c = configure.Train(
learning_rate=None, epochs=None, batch_size=None, loss_func="mse", ar_sparsity=None, train_speed=0
)
c.set_auto_batch_epoch(n_data)
log.debug("n_data: {}, batch: {}, epoch: {}".format(n_data, c.batch_size, c.epochs))
batch, epoch = check["{}".format(n_data)]
assert c.batch_size == batch
assert c.epochs == epoch
def test_train_speed(self):
df = pd.read_csv(PEYTON_FILE, nrows=102)[:100]
batch_size = 16
epochs = 2
learning_rate = 1.0
check = {
"-2": (int(batch_size / 4), int(epochs * 4), learning_rate / 4),
"-1": (int(batch_size / 2), int(epochs * 2), learning_rate / 2),
"0": (batch_size, epochs, learning_rate),
"1": (int(batch_size * 2), max(1, int(epochs / 2)), learning_rate * 2),
"2": (int(batch_size * 4), max(1, int(epochs / 4)), learning_rate * 4),
}
for train_speed in [-1, 0, 2]:
m = NeuralProphet(
learning_rate=learning_rate,
batch_size=batch_size,
epochs=epochs,
train_speed=train_speed,
)
m.fit(df, freq="D")
c = m.config_train
log.debug(
"train_speed: {}, batch: {}, epoch: {}, learning_rate: {}".format(
train_speed, c.batch_size, c.epochs, c.learning_rate
)
)
batch, epoch, lr = check["{}".format(train_speed)]
assert c.batch_size == batch
assert c.epochs == epoch
assert math.isclose(c.learning_rate, lr)
batch_size = 8
epochs = 320
check2 = {
"-2": (int(batch_size / 4), int(epochs * 4)),
"-1": (int(batch_size / 2), int(epochs * 2)),
"0": (batch_size, epochs),
"1": (int(batch_size * 2), int(epochs / 2)),
"2": (int(batch_size * 4), int(epochs / 4)),
}
for train_speed in [2]:
m = NeuralProphet(
train_speed=train_speed,
)
m.fit(df, freq="D")
c = m.config_train
log.debug("train_speed: {}, batch: {}, epoch: {}".format(train_speed, c.batch_size, c.epochs))
batch, epoch = check2["{}".format(train_speed)]
assert c.batch_size == batch
assert c.epochs == epoch
def test_split_impute(self):
def check_split(df_in, df_len_expected, n_lags, n_forecasts, freq, p=0.1):
m = NeuralProphet(
n_lags=n_lags,
n_forecasts=n_forecasts,
)
df_in = df_utils.check_dataframe(df_in, check_y=False)
df_in = m._handle_missing_data(df_in, freq=freq, predicting=False)
assert df_len_expected == len(df_in)
total_samples = len(df_in) - n_lags - 2 * n_forecasts + 2
df_train, df_test = m.split_df(df_in, freq=freq, valid_p=0.1, inputs_overbleed=True)
n_train = len(df_train) - n_lags - n_forecasts + 1
n_test = len(df_test) - n_lags - n_forecasts + 1
assert total_samples == n_train + n_test
n_test_expected = max(1, int(total_samples * p))
n_train_expected = total_samples - n_test_expected
assert n_train == n_train_expected
assert n_test == n_test_expected
log.info("testing: SPLIT: daily data")
df = pd.read_csv(PEYTON_FILE)
check_split(df_in=df, df_len_expected=len(df) + 59, freq="D", n_lags=10, n_forecasts=3)
log.info("testing: SPLIT: monthly data")
df = pd.read_csv(AIR_FILE)
check_split(df_in=df, df_len_expected=len(df), freq="MS", n_lags=10, n_forecasts=3)
log.info("testing: SPLIT: 5min data")
df = | pd.read_csv(YOS_FILE) | pandas.read_csv |
"""Tests for :py:mod:`features.daytime`"""
import pytest
import pandas as pd
import numpy as np
from pvlib.location import Location
from pvanalytics.features import daytime
@pytest.fixture(scope='module',
params=['H', '15T', pytest.param('T', marks=pytest.mark.slow)])
def clearsky_january(request, albuquerque):
return albuquerque.get_clearsky(
pd.date_range(
start='1/1/2020',
end='1/30/2020',
tz='MST',
freq=request.param
),
model='simplified_solis'
)
def _assert_daytime_no_shoulder(clearsky, output):
# every night-time value in `output` has low or 0 irradiance
assert all(clearsky[~output] < 3)
if pd.infer_freq(clearsky.index) == 'T':
# Blur the boundaries between night and day if testing
# high-frequency data since the daytime filtering algorithm does
# not have one-minute accuracy.
clearsky = clearsky.rolling(window=30, center=True).max()
# every day-time value is within 15 minutes of a non-zero
# irradiance measurement
assert all(clearsky[output] > 0)
def test_daytime_with_clipping(clearsky_january):
ghi = clearsky_january['ghi'].copy()
ghi.loc[ghi >= 500] = 500
_assert_daytime_no_shoulder(
clearsky_january['ghi'],
daytime.power_or_irradiance(ghi)
)
# Include a period where data goes to zero during clipping and
# returns to normal after the clipping is done
ghi.loc[ghi['1/3/2020'].between_time('12:30', '15:30').index] = 0
_assert_daytime_no_shoulder(
clearsky_january['ghi'],
daytime.power_or_irradiance(ghi)
)
def test_daytime_overcast(clearsky_january):
ghi = clearsky_january['ghi'].copy()
ghi.loc['1/3/2020':'1/5/2020'] *= 0.5
ghi.loc['1/7/2020':'1/8/2020'] *= 0.6
_assert_daytime_no_shoulder(
clearsky_january['ghi'],
daytime.power_or_irradiance(ghi)
)
def test_daytime_split_day():
location = Location(35, -150)
clearsky = location.get_clearsky(
| pd.date_range(start='1/1/2020', end='1/10/2020', freq='15T') | pandas.date_range |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 17 15:12:37 2021
@author: suriyaprakashjambunathan
"""
# Fitting the nan values with the average
def avgfit(l):
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
avg = sum(arr)/len(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
return(fit_arr)
# Weighted Mean Absolute Percentage Error
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = list(y_true), list(y_pred)
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
# Importing the Libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import explained_variance_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.simplefilter(action='ignore')
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
for i in range(len(bw)):
if bw[i] < 100:
bw[i] = 'Class 1'
elif bw[i] >= 100 and bw[i] < 115:
bw[i] = 'Class 2'
elif bw[i] >= 115 and bw[i] < 120:
bw[i] = 'Class 3'
elif bw[i] >= 120 and bw[i] < 121:
bw[i] = 'Class 4'
elif bw[i] >= 121 and bw[i] < 122:
bw[i] = 'Class 5'
elif bw[i] >= 122 :
bw[i] = 'Class 6'
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
for i in range(len(gain)):
if gain[i] < 1.3:
gain[i] = 'Class 1'
elif gain[i] >= 1.3 and gain[i] < 1.5:
gain[i] = 'Class 2'
elif gain[i] >= 1.5 and gain[i] < 2.4:
gain[i] = 'Class 3'
elif gain[i] >= 2.4 and gain[i] < 2.7:
gain[i] = 'Class 4'
elif gain[i] >= 2.7 and gain[i] < 2.9:
gain[i] = 'Class 5'
elif gain[i] >= 2.9 and gain[i] < 3.5:
gain[i] = 'Class 6'
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
for i in range(len(vswr)):
if vswr[i] >= 1 and vswr[i] < 1.16:
vswr[i] = 'Class 1'
elif vswr[i] >= 1.16 and vswr[i] < 1.32:
vswr[i] = 'Class 2'
elif vswr[i] >= 1.32 and vswr[i] < 1.5:
vswr[i] = 'Class 3'
elif vswr[i] >= 1.5 and vswr[i] < 2:
vswr[i] = 'Class 4'
elif vswr[i] >= 2 and vswr[i] < 4:
vswr[i] = 'Class 5'
elif vswr[i] >= 4:
vswr[i] = 'Class 6'
y1 = pd.DataFrame(bw)
y2 = pd.DataFrame(gain)
y3 = pd.DataFrame(vswr)
# Accuracy list
acc_list = []
params = ['bandwidth','gain','vswr']
y = | pd.DataFrame() | pandas.DataFrame |
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = | pd.concat([loading_scale_dff, zero_scale_dff], axis=0) | pandas.concat |
#!/usr/bin/env python
import json
import os
import sys
import pandas as pd
import seaborn as sns
import sklearn.metrics
import report_resources
import matplotlib.pyplot as plt
METRICS_COLS = ('Release', 'Model', 'Metric', 'Score')
def main(drf_results_dir: str, crnn_results_dir: str, output_file: str, model_name: str):
report_resources.ensure_output_dir_exists()
drf_split_metrics = _read_drf_split_metrics(drf_results_dir)
crnn_split_metrics = _read_crnn_split_metrics(crnn_results_dir, model_name)
_write_pr_plot(pd.concat([crnn_split_metrics, drf_split_metrics]), output_file)
def _write_pr_plot(metrics, output_file: str):
fig, ax = plt.subplots(nrows=1, ncols=1)
sns.lineplot(data=metrics,
x='Release',
y='Score',
hue='Metric',
style='Model',
ci=95,
ax=ax)
fig.savefig(os.path.join(output_file), bbox_inches='tight')
def _read_drf_split_metrics(results_dir: str):
results = | pd.DataFrame([], columns=METRICS_COLS) | pandas.DataFrame |
#!/usr/bin/env python
import os
import sys
import h5py
import logging
import traceback
import warnings
import numpy as np
import scipy.cluster.hierarchy
import scipy.spatial.distance as ssd
from collections import defaultdict
import inStrain.SNVprofile
import inStrain.readComparer
import inStrain.profile.profile_utilities
import matplotlib
matplotlib.use('Agg')
import matplotlib.ticker as ticker
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import seaborn as sns
import drep.d_cluster
import drep.d_analyze
matplotlib.rcParams['pdf.fonttype'] = 42
def mm_plot(db, left_val='breadth', right_val='coverage', title='',\
minANI=0.9):
'''
The input db for this is "mm_genome_info" from "makeGenomeWide" in genomeUtilities.py
'''
db = db.sort_values('ANI_level')
sns.set_style('white')
# breadth
fig, ax1 = plt.subplots()
ax1.plot(db['ANI_level'], db[left_val], ls='-', color='blue')
if left_val == 'breadth':
ax1.plot(db['ANI_level'], estimate_breadth(db['coverage']), ls='--', color='lightblue')
ax1.set_ylabel(left_val, color='blue')
ax1.set_xlabel('Minimum read ANI level')
ax1.set_ylim(0,1)
# coverage
ax2 = ax1.twinx()
ax2.plot(db['ANI_level'], db[right_val], ls='-', color='red')
ax2.set_ylabel(right_val, color='red')
ax2.set_ylim(0,)
# asthetics
plt.xlim(1, max(minANI, db['ANI_level'].min()))
plt.title(title)
def estimate_breadth(coverage):
'''
Estimate breadth based on coverage
Based on the function breadth = -1.000 * e^(0.883 * coverage) + 1.000
'''
return (-1) * np.exp(-1 * ((0.883) * coverage)) + 1
# def genome_wide_plot(IS_locs, scaffolds, what='coverage', ANI_levels=[100, 98, 0], window_len=1000):
# '''
# Arguments:
# IS_locs = list of IS objects
# scaffolds = list of scaffolds to profile and plot (in order)
# Keyword arguments:
# ANI_levels = list of ANI levesl to plot
# window_len = length of each window to profile
# '''
# if what == 'coverage':
# item = 'covT'
# elif what == 'clonality':
# item = 'clonT'
# # Load coverages for the scaffolds at each ANI level
# dbs = []
# for IS_loc in IS_locs:
# IS = inStrain.SNVprofile.SNVprofile(IS_loc)
# if what in ['coverage', 'clonality']:
# wdb, breaks = load_windowed_coverage(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels, item=item)
# elif what in ['linkage']:
# wdb, breaks = load_windowed_linkage(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels)
# elif what in ['snp_density']:
# wdb, breaks = load_windowed_SNP_density(IS, scaffolds, window_len=window_len, ANI_levels=ANI_levels)
# wdb['name'] = os.path.basename(IS_loc)
# dbs.append(wdb)
# Wdb = pd.concat(dbs, sort=True)
# # Make the plot
# multiple_coverage_plot(Wdb, breaks, thing=what)
# return Wdb, breaks
def load_windowed_metrics(scaffolds, s2l, rLen, metrics=None, window_len=None, ANI_levels=[0, 100],
min_scaff_len=0, report_midpoints=False, covTs=False, clonTs=False,
raw_linkage_table=False, cumulative_snv_table=False):
if metrics is None:
metrics = ['coverage', 'nucl_diversity', 'linkage', 'snp_density']
if type(metrics) != type([]):
print("Metrics must be a list")
return
# Figure out the MMs needed
#rLen = IS.get_read_length()
mms = [_get_mm(None, ANI, rLen=rLen) for ANI in ANI_levels]
# Sort the scaffolds
#s2l = IS.get('scaffold2length')
scaffolds = sorted(scaffolds, key=s2l.get, reverse=True)
if min_scaff_len > 0:
scaffolds = [s for s in scaffolds if s2l[s] >= min_scaff_len]
# Figure out the window length
if window_len == None:
window_len = int(sum([s2l[s] for s in scaffolds]) / 100)
else:
window_len = int(window_len)
# Calculate the breaks
breaks = []
midpoints = {}
tally = 0
for scaffold in scaffolds:
midpoints[scaffold] = tally + int(s2l[scaffold] / 2)
tally += s2l[scaffold]
breaks.append(tally)
dbs = []
if 'coverage' in metrics:
if covTs == False:
logging.error("need covTs for coverage")
raise Exception
cdb = load_windowed_coverage_or_clonality('coverage', covTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'coverage'
dbs.append(cdb)
# if 'clonality' in metrics:
# cdb = load_windowed_coverage_or_clonality(IS, 'clonality', scaffolds, window_len, mms, ANI_levels, s2l)
# cdb['metric'] = 'clonality'
# dbs.append(cdb)
if 'nucl_diversity' in metrics:
if clonTs == False:
logging.error("need clonTs for microdiversity")
raise Exception
cdb = load_windowed_coverage_or_clonality('nucl_diversity', clonTs, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'nucl_diversity'
dbs.append(cdb)
if 'linkage' in metrics:
if raw_linkage_table is False:
logging.error("need raw_linkage_table for linkage")
raise Exception
cdb = load_windowed_linkage(raw_linkage_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'linkage'
dbs.append(cdb)
if 'snp_density' in metrics:
if cumulative_snv_table is False:
logging.error("need cumulative_snv_table for snp_density")
raise Exception
if len(cumulative_snv_table) > 0:
cdb = load_windowed_SNP_density(cumulative_snv_table, scaffolds, window_len, mms, ANI_levels, s2l)
cdb['metric'] = 'snp_density'
dbs.append(cdb)
if len(dbs) > 0:
Wdb = pd.concat(dbs, sort=True)
Wdb = Wdb.rename(columns={'avg_cov':'value'})
else:
Wdb = pd.DataFrame()
# Add blanks at the breaks
table = defaultdict(list)
for mm, ani in zip(mms, ANI_levels):
for metric in Wdb['metric'].unique():
for bre in breaks:
table['scaffold'].append('break')
table['mm'].append(mm)
table['ANI'].append(ani)
table['adjusted_start'].append(bre) # The minus one makes sure it doenst split things it shouldnt
table['adjusted_end'].append(bre)
table['value'].append(np.nan)
table['metric'].append(metric)
bdb = pd.DataFrame(table)
Wdb = pd.concat([Wdb, bdb], sort=False)
if len(Wdb) > 0:
Wdb.loc[:,'midpoint'] = [np.mean([x, y]) for x, y in zip(Wdb['adjusted_start'], Wdb['adjusted_end'])]
Wdb = Wdb.sort_values(['metric', 'mm', 'midpoint', 'scaffold'])
if report_midpoints:
return Wdb, breaks, midpoints
else:
return Wdb, breaks
def load_windowed_coverage_or_clonality(thing, covTs, scaffolds, window_len, mms, ANI_levels, s2l):
'''
Get the windowed coverage
Pass in a clonTs for microdiversity and covTs for coverage
'''
if thing == 'coverage':
item = 'covT'
elif thing == 'nucl_diversity':
item = 'clonT'
else:
print("idk what {0} is".format(thing))
return
# Get the covTs
#covTs = IS.get(item, scaffolds=scaffolds)
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in covTs:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
covT = covTs[scaffold]
for mm, ani in zip(mms, ANI_levels):
if item == 'covT':
cov = inStrain.profile.profile_utilities.mm_counts_to_counts_shrunk(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold])
elif item == 'clonT':
cov = _get_basewise_clons3(covT, mm)
if len(cov) == 0:
continue
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db.loc[:,'avg_cov'] = [1 - x if x == x else x for x in db['avg_cov']]
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db.loc[:,'adjusted_start'] = db['start'] + tally
db.loc[:,'adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = pd.DataFrame()
return Wdb#, breaks
def load_windowed_linkage(Ldb, scaffolds, window_len, mms, ANI_levels, s2l, on='r2'):
# Get the linkage table
#Ldb = IS.get('raw_linkage_table')
Ldb = Ldb[Ldb['scaffold'].isin(scaffolds)].sort_values('mm')
got_scaffolds = set(Ldb['scaffold'].unique())
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in got_scaffolds:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
ldb = Ldb[Ldb['scaffold'] == scaffold]
for mm, ani in zip(mms, ANI_levels):
db = ldb[ldb['mm'] <= int(mm)].drop_duplicates(subset=['scaffold', 'position_A', 'position_B'], keep='last')
cov = db.set_index('position_A')[on].sort_index()
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len=False)
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = pd.concat(dbs)
else:
Wdb = pd.DataFrame()
return Wdb
def load_windowed_SNP_density(Ldb, scaffolds, window_len, mms, ANI_levels, s2l):
# Get the table
#Ldb = IS.get('cumulative_snv_table')
Ldb = Ldb[Ldb['scaffold'].isin(scaffolds)].sort_values('mm')
got_scaffolds = list(Ldb['scaffold'].unique())
# Make the windows
dbs = []
tally = 0
breaks = []
for scaffold in scaffolds:
if scaffold not in got_scaffolds:
tally += s2l[scaffold]
breaks.append(tally)
continue
else:
ldb = Ldb[Ldb['scaffold'] == scaffold]
for mm, ani in zip(mms, ANI_levels):
db = ldb[ldb['mm'] <= int(mm)].drop_duplicates(subset=['scaffold', 'position'], keep='last')
cov = db.set_index('position')['ref_base'].sort_index()
db = _gen_windowed_cov(cov, window_len, sLen=s2l[scaffold], full_len='count')
db['scaffold'] = scaffold
db['mm'] = mm
db['ANI'] = ani
db['adjusted_start'] = db['start'] + tally
db['adjusted_end'] = db['end'] + tally
dbs.append(db)
tally += s2l[scaffold]
breaks.append(tally)
if len(dbs) > 0:
Wdb = | pd.concat(dbs) | pandas.concat |
"""
Library of standardized plotting functions for basic plot formats
"""
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import xarray as xr
from scipy.interpolate import interp1d
from scipy.signal import welch
# Standard field labels
# - default: e.g., "Km/s"
# - all superscript: e.g., "K m s^{-1}"
fieldlabels_default_units = {
'wspd': r'Wind speed [m/s]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m/s]',
'v': r'v [m/s]',
'w': r'Vertical wind speed [m/s]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2/s^2}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{Km/s}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2/s^2}]$',
}
fieldlabels_superscript_units = {
'wspd': r'Wind speed [m s$^{-1}$]',
'wdir': r'Wind direction [$^\circ$]',
'u': r'u [m s$^{-1}$]',
'v': r'v [m s$^{-1}$]',
'w': r'Vertical wind speed [m s$^{-1}$]',
'theta': r'$\theta$ [K]',
'thetav': r'$\theta_v$ [K]',
'uu': r'$\langle u^\prime u^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vv': r'$\langle v^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'ww': r'$\langle w^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uv': r'$\langle u^\prime v^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'uw': r'$\langle u^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'vw': r'$\langle v^\prime w^\prime \rangle \;[\mathrm{m^2 s^{-2}}]$',
'tw': r'$\langle w^\prime \theta^\prime \rangle \;[\mathrm{K m s^{-1}}]$',
'TI': r'TI $[-]$',
'TKE': r'TKE $[\mathrm{m^2 s^{-2}}]$',
}
# Standard field labels for frequency spectra
spectrumlabels_default_units = {
'u': r'$E_{uu}\;[\mathrm{m^2/s}]$',
'v': r'$E_{vv}\;[\mathrm{m^2/s}]$',
'w': r'$E_{ww}\;[\mathrm{m^2/s}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2 s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2/s}]$',
}
spectrumlabels_superscript_units = {
'u': r'$E_{uu}\;[\mathrm{m^2\;s^{-1}}]$',
'v': r'$E_{vv}\;[\mathrm{m^2\;s^{-1}}]$',
'w': r'$E_{ww}\;[\mathrm{m^2\;s^{-1}}]$',
'theta': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'thetav': r'$E_{\theta\theta}\;[\mathrm{K^2\;s}]$',
'wspd': r'$E_{UU}\;[\mathrm{m^2\;s^{-1}}]$',
}
# Default settings
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
standard_fieldlabels = fieldlabels_default_units
standard_spectrumlabels = spectrumlabels_default_units
# Supported dimensions and associated names
dimension_names = {
'time': ['datetime','time','Time'],
'height': ['height','heights','z'],
'frequency': ['frequency','f',]
}
# Show debug information
debug = False
def plot_timeheight(datasets,
fields=None,
fig=None,ax=None,
colorschemes={},
fieldlimits=None,
heightlimits=None,
timelimits=None,
fieldlabels={},
labelsubplots=False,
showcolorbars=True,
fieldorder='C',
ncols=1,
subfigsize=(12,4),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time-height contours for different datasets and fields
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are MultiIndex Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal ndatasets*nfields
colorschemes : str or dict
Name of colorschemes. If only one field is plotted, colorschemes
can be a string. Otherwise, it should be a dictionary with
entries <fieldname>: name_of_colorschemes
Missing colorschemess are set to 'viridis'
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
heightlimits : list or tuple
Height axis limits
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showcolorbars : bool
Show colorbar per subplot
fieldorder : 'C' or 'F'
Index ordering for assigning fields and datasets to axes grid
(row by row). Fields is considered the first axis, so 'C' means
fields change slowest, 'F' means fields change fastest.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets and
fields and can not be used to set dataset or field specific
limits, colorschemess, norms, etc.
Example uses include setting shading, rasterized, etc.
"""
args = PlottingInput(
datasets=datasets,
fields=fields,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
colorschemes=colorschemes,
fieldorder=fieldorder
)
args.set_missing_fieldlimits()
nfields = len(args.fields)
ndatasets = len(args.datasets)
ntotal = nfields * ndatasets
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
sharey=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Initialise list of colorbars
cbars = []
# Loop over datasets, fields and times
for i, dfname in enumerate(args.datasets):
df = args.datasets[dfname]
heightvalues = _get_dim_values(df,'height')
timevalues = _get_dim_values(df,'time')
assert(heightvalues is not None), 'timeheight plot needs a height axis'
assert(timevalues is not None), 'timeheight plot needs a time axis'
if isinstance(timevalues, pd.DatetimeIndex):
# If plot local time, shift timevalues
if plot_local_time is not False:
timevalues = timevalues + pd.to_timedelta(local_time_offset,'h')
# Convert to days since 0001-01-01 00:00 UTC, plus one
numerical_timevalues = mdates.date2num(timevalues.values)
else:
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# Timevalues is already a numerical array
numerical_timevalues = timevalues
# Create time-height mesh grid
tst = _get_staggered_grid(numerical_timevalues)
zst = _get_staggered_grid(heightvalues)
Ts,Zs = np.meshgrid(tst,zst,indexing='xy')
# Create list with available fields only
available_fields = _get_available_fieldnames(df,args.fields)
# Pivot all fields in a dataset at once
df_pivot = _get_pivot_table(df,'height',available_fields)
for j, field in enumerate(args.fields):
# If available_fields is [None,], fieldname is unimportant
if available_fields == [None]:
pass
# Else, check if field is available
elif not field in available_fields:
print('Warning: field "'+field+'" not available in dataset '+dfname)
continue
# Store plotting options in dictionary
plotting_properties = {
'vmin': args.fieldlimits[field][0],
'vmax': args.fieldlimits[field][1],
'cmap': args.cmap[field]
}
# Index of axis corresponding to dataset i and field j
if args.fieldorder=='C':
axi = i*nfields + j
else:
axi = j*ndatasets + i
# Extract data from dataframe
fieldvalues = _get_pivoted_field(df_pivot,field)
# Gather label, color, general options and dataset-specific options
# (highest priority to dataset-specific options, then general options)
try:
plotting_properties = {**plotting_properties,**kwargs,**datasetkwargs[dfname]}
except KeyError:
plotting_properties = {**plotting_properties,**kwargs}
# Plot data
im = axv[axi].pcolormesh(Ts,Zs,fieldvalues.T,**plotting_properties)
# Colorbar mark up
if showcolorbars:
cbar = fig.colorbar(im,ax=axv[axi],shrink=1.0)
# Set field label if known
try:
cbar.set_label(args.fieldlabels[field])
except KeyError:
pass
# Save colorbar
cbars.append(cbar)
# Set title if more than one dataset
if ndatasets>1:
axv[axi].set_title(dfname,fontsize=16)
# Format time axis
if isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
ax2 = _format_time_axis(fig,axv[(nrows-1)*ncols:],plot_local_time,local_time_offset,timelimits)
else:
ax2 = None
# Set time limits if specified
if not timelimits is None:
axv[-1].set_xlim(timelimits)
# Set time label
for axi in axv[(nrows-1)*ncols:]:
axi.set_xlabel('time [s]')
if not heightlimits is None:
axv[-1].set_ylim(heightlimits)
# Add y labels
for r in range(nrows):
axv[r*ncols].set_ylabel(r'Height [m]')
# Align time, height and color labels
_align_labels(fig,axv,nrows,ncols)
if showcolorbars:
_align_labels(fig,[cb.ax for cb in cbars],nrows,ncols)
# Number sub figures as a, b, c, ...
if labelsubplots is not False:
try:
hoffset, voffset = labelsubplots
except (TypeError, ValueError):
hoffset, voffset = -0.14, 1.0
for i,axi in enumerate(axv):
axi.text(hoffset,voffset,'('+chr(i+97)+')',transform=axi.transAxes,size=16)
# Return cbar instead of array if ntotal==1
if len(cbars)==1:
cbars=cbars[0]
if (plot_local_time is not False) and ax2 is not None:
return fig, ax, ax2, cbars
else:
return fig, ax, cbars
def plot_timehistory_at_height(datasets,
fields=None,
heights=None,
fig=None,ax=None,
fieldlimits=None,
timelimits=None,
fieldlabels={},
cmap=None,
stack_by_datasets=None,
labelsubplots=False,
showlegend=None,
ncols=1,
subfigsize=(12,3),
plot_local_time=False,
local_time_offset=0,
datasetkwargs={},
**kwargs
):
"""
Plot time history at specified height(s) for various dataset(s)
and/or field(s).
By default, data for multiple datasets or multiple heights are
stacked in a single subplot. When multiple datasets and multiple
heights are specified together, heights are stacked in a subplot
per field and per dataset.
Usage
=====
datasets : pandas.DataFrame or dict
Dataset(s). If more than one set, datasets should
be a dictionary with entries <dataset_name>: dataset
fields : str, list, 'all' (or None)
Fieldname(s) corresponding to particular column(s) of
the datasets. fields can be None if input are Series.
'all' means all fields will be plotted (in this case all
datasets should have the same fields)
heights : float, list, 'all' (or None)
Height(s) for which time history is plotted. heights can be
None if all datasets combined have no more than one height
value. 'all' means the time history for all heights in the
datasets will be plotted (in this case all datasets should
have the same heights)
fig : figure handle
Custom figure handle. Should be specified together with ax
ax : axes handle, or list or numpy ndarray with axes handles
Customand axes handle(s).
Size of ax should equal nfields * (ndatasets or nheights)
fieldlimits : list or tuple, or dict
Value range for the various fields. If only one field is
plotted, fieldlimits can be a list or tuple. Otherwise, it
should be a dictionary with entries <fieldname>: fieldlimit.
Missing fieldlimits are set automatically
timelimits : list or tuple
Time axis limits
fieldlabels : str or dict
Custom field labels. If only one field is plotted, fieldlabels
can be a string. Otherwise it should be a dictionary with
entries <fieldname>: fieldlabel
cmap : str
Colormap used when stacking heights
stack_by_datasets : bool (or None)
Flag to specify what is plotted ("stacked") together per subfigure.
If True, stack datasets together, otherwise stack by heights. If
None, stack_by_datasets will be set based on the number of heights
and datasets.
labelsubplots : bool, list or tuple
Label subplots as (a), (b), (c), ... If a list or tuple is given
their values should be the horizontal and vertical position
relative to each subaxis.
showlegend : bool (or None)
Label different plots and show legend. If None, showlegend is set
to True if legend will have more than one entry, otherwise it is
set to False.
ncols : int
Number of columns in axes grid, must be a true divisor of total
number of axes.
subfigsize : list or tuple
Standard size of subfigures
plot_local_time : bool or str
Plot dual x axes with both UTC time and local time. If a str is
provided, then plot_local_time is assumed to be True and the str
is used as the datetime format.
local_time_offset : float
Local time offset from UTC
datasetkwargs : dict
Dataset-specific options that are passed on to the actual
plotting function. These options overwrite general options
specified through **kwargs. The argument should be a dictionary
with entries <dataset_name>: {**kwargs}
**kwargs : other keyword arguments
Options that are passed on to the actual plotting function.
Note that these options should be the same for all datasets,
fields and heights, and they can not be used to set dataset,
field or height specific colors, limits, etc.
Example uses include setting linestyle/width, marker, etc.
"""
# Avoid FutureWarning concerning the use of an implicitly registered
# datetime converter for a matplotlib plotting method. The converter
# was registered by pandas on import. Future versions of pandas will
# require explicit registration of matplotlib converters, as done here.
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
args = PlottingInput(
datasets=datasets,
fields=fields,
heights=heights,
fieldlimits=fieldlimits,
fieldlabels=fieldlabels,
)
nfields = len(args.fields)
nheights = len(args.heights)
ndatasets = len(args.datasets)
# Concatenate custom and standard field labels
# (custom field labels overwrite standard fields labels if existent)
args.fieldlabels = {**standard_fieldlabels, **args.fieldlabels}
# Set up subplot grid
if stack_by_datasets is None:
if nheights>1:
stack_by_datasets = False
else:
stack_by_datasets = True
if stack_by_datasets:
ntotal = nfields*nheights
else:
ntotal = nfields*ndatasets
fig, ax, nrows, ncols = _create_subplots_if_needed(
ntotal,
ncols,
sharex=True,
subfigsize=subfigsize,
hspace=0.2,
fig=fig,
ax=ax
)
# Create flattened view of axes
axv = np.asarray(ax).reshape(-1)
# Set showlegend if not specified
if showlegend is None:
if (stack_by_datasets and ndatasets>1) or (not stack_by_datasets and nheights>1):
showlegend = True
else:
showlegend = False
# Loop over datasets and fields
for i,dfname in enumerate(args.datasets):
df = args.datasets[dfname]
timevalues = _get_dim_values(df,'time',default_idx=True)
assert(timevalues is not None), 'timehistory plot needs a time axis'
heightvalues = _get_dim_values(df,'height')
if isinstance(timevalues, pd.TimedeltaIndex):
timevalues = timevalues.total_seconds()
# If plot local time, shift timevalues
if (plot_local_time is not False) and \
isinstance(timevalues, (pd.DatetimeIndex, pd.TimedeltaIndex)):
timevalues = timevalues + | pd.to_timedelta(local_time_offset,'h') | pandas.to_timedelta |
import unittest
import numpy as np
import pandas as pd
from numpy import testing as np_test
from pgmpy.estimators.CITests import pearsonr, chi_square
np.random.seed(42)
class TestPearsonr(unittest.TestCase):
def setUp(self):
self.df_ind = pd.DataFrame(np.random.randn(10000, 3), columns=["X", "Y", "Z"])
Z = np.random.randn(10000)
X = 3 * Z + np.random.normal(loc=0, scale=0.1, size=10000)
Y = 2 * Z + np.random.normal(loc=0, scale=0.1, size=10000)
self.df_cind = pd.DataFrame({"X": X, "Y": Y, "Z": Z})
Z1 = np.random.randn(10000)
Z2 = np.random.randn(10000)
X = 3 * Z1 + 2 * Z2 + np.random.normal(loc=0, scale=0.1, size=10000)
Y = 2 * Z1 + 3 * Z2 + np.random.normal(loc=0, scale=0.1, size=10000)
self.df_cind_mul = pd.DataFrame({"X": X, "Y": Y, "Z1": Z1, "Z2": Z2})
X = np.random.rand(10000)
Y = np.random.rand(10000)
Z = 2 * X + 2 * Y + np.random.normal(loc=0, scale=0.1, size=10000)
self.df_vstruct = pd.DataFrame({"X": X, "Y": Y, "Z": Z})
def test_pearsonr(self):
coef, p_value = pearsonr(X="X", Y="Y", Z=[], data=self.df_ind)
self.assertTrue(coef < 0.1)
self.assertTrue(p_value > 0.05)
coef, p_value = pearsonr(X="X", Y="Y", Z=["Z"], data=self.df_cind)
self.assertTrue(coef < 0.1)
self.assertTrue(p_value > 0.05)
coef, p_value = pearsonr(X="X", Y="Y", Z=["Z1", "Z2"], data=self.df_cind_mul)
self.assertTrue(coef < 0.1)
self.assertTrue(p_value > 0.05)
coef, p_value = pearsonr(X="X", Y="Y", Z=["Z"], data=self.df_vstruct)
self.assertTrue(abs(coef) > 0.9)
self.assertTrue(p_value < 0.05)
class TestChiSquare(unittest.TestCase):
def setUp(self):
self.df_adult = | pd.read_csv("pgmpy/tests/test_estimators/testdata/adult.csv") | pandas.read_csv |
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
from seffaflik.elektrik import santraller as __santraller
__first_part_url = "production/"
def organizasyonlar():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyon bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyon Bilgileri(Id, Adı, EIC Kodu, Kısa Adı, Durum)
"""
try:
particular_url = __first_part_url + "dpp-organization"
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["organizations"])
df.rename(index=str,
columns={"organizationId": "Id", "organizationName": "Adı",
"organizationETSOCode": "EIC Kodu", "organizationShortName": "Kısa Adı",
"organizationStatus": "Durum"},
inplace=True)
df = df[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def organizasyon_veris_cekis_birimleri(eic):
"""
İlgili eic değeri için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan organizasyonun uzlaştırmaya
esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
eic : metin formatında organizasyon eic kodu
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonun UEVÇB Bilgileri(Id, Adı, EIC Kodu)
"""
if __dogrulama.__kgup_girebilen_organizasyon_dogrulama(eic):
try:
particular_url = __first_part_url + "dpp-injection-unit-name?organizationEIC=" + eic
json = __make_requests(particular_url)
df_unit = __pd.DataFrame(json["body"]["injectionUnitNames"])
df_unit.rename(index=str, columns={"id": "Id", "name": "Adı", "eic": "EIC Kodu"}, inplace=True)
df_unit = df_unit[["Id", "Adı", "EIC Kodu"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df_unit
def tum_organizasyonlar_veris_cekis_birimleri():
"""
Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyon ve bu organizasyonların
uzlaştırmaya esas veriş-çekiş birim (UEVÇB) bilgilerini vermektedir.
Parametreler
------------
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonlar ve UEVÇB Bilgileri(Org Id, Org Adı, Org EIC Kodu, Org Kısa Adı, Org Durum, UEVÇB Id,
UEVÇB Adı, UEVÇB EIC Kodu)
"""
list_org = organizasyonlar()[["Id", "Adı", "EIC Kodu", "Kısa Adı", "Durum"]].to_dict("records")
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.map(__organizasyon_cekis_birimleri, list_org, chunksize=1)
return __pd.concat(list_df_unit).reset_index(drop=True)
def kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı kesinleşmiş günlük üretim planı (KGÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KGUP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle
,Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "dpp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların saatlik
KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_kgup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Kesinleşmiş Gün Öncesi Üretim Planı (KGÜP) girebilecek olan tüm organizasyonların
uzlaştırmaya esas veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__kgup_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic="", uevcb_eic=""):
"""
İlgili tarih aralığı için kaynak bazlı emre amade kapasite (EAK) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için eak bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
uevcb_eic : metin formatında metin formatında uevcb eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
EAK (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür, Biyokütle,
Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "aic" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi \
+ "&organizationEIC=" + organizasyon_eic + "&uevcbEIC=" + uevcb_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["aicList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların saatlik EAK bilgilerini
vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
EAK Girebilen Organizasyonların EAK Değerleri (Tarih, Saat, EAK)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak, list_date_org_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def tum_uevcb_eak(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için Emre Amade Kapasite (EAK) girebilecek olan tüm organizasyonların uzlaştırmaya esas
veriş-çekiş birimlerinin saatlik KGUP bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
KGÜP Girebilen Organizasyonların UEVCB KGUP Değerleri (Tarih, Saat, KGUP)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org_uevcb = tum_organizasyonlar_veris_cekis_birimleri()
list_org_uevcb = org_uevcb[["Org EIC Kodu", "UEVÇB EIC Kodu", "UEVÇB Adı"]].to_dict("records")
list_org_uevcb_len = len(list_org_uevcb)
list_date_org_uevcb_eic = list(
zip([baslangic_tarihi] * list_org_uevcb_len, [bitis_tarihi] * list_org_uevcb_len, list_org_uevcb))
list_date_org_uevcb_eic = list(map(list, list_date_org_uevcb_eic))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__eak_uevcb, list_date_org_uevcb_eic, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def kudup(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_id="", uevcb_id=""):
"""
İlgili tarih aralığı için gün içi piyasasının kapanışından sonra yapılan güncellemeyle kaynak bazlı Kesinleşmiş
Uzlaştırma Dönemi Üretim Planı (KUDÜP) bilgisini vermektedir.
Not: "organizasyon_eic" değeri girildiği, "uevcb_eic" değeri girilmediği taktirde organizasyona ait tüm uevcb'lerin
toplamı için kgüp bilgisini vermektedir. Her iki değer de girildiği taktirde ilgili organizasyonun ilgili uevcb'si
için kgüp bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_id : metin formatında organizasyon id (Varsayılan: "")
uevcb_id : metin formatında uevcb id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
KUDÜP (Tarih, Saat, Doğalgaz, Barajlı, Linyit, Akarsu, İthal Kömür, Rüzgar, Fuel Oil, Jeo Termal, Taş Kömür,
Biyokütle, Nafta, Diğer, Toplam)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "sbfgp" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&organizationId=" + organizasyon_id + "&uevcbId=" + uevcb_id
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dppList"])
df["Saat"] = df["tarih"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["tarih"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"akarsu": "Akarsu", "barajli": "Barajlı", "biokutle": "Biyokütle", "diger": "Diğer",
"dogalgaz": "Doğalgaz", "fuelOil": "Fuel Oil", "ithalKomur": "İthal Kömür",
"jeotermal": "Jeo Termal", "linyit": "Linyit", "nafta": "Nafta",
"ruzgar": "Rüzgar", "tasKomur": "Taş Kömür", "toplam": "Toplam"}, inplace=True)
df = df[["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Taş Kömür", "Biyokütle", "Nafta", "Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def uevm(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik Uzlaştırmaya Esas Variş Miktarı (UEVM) bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Uzlaştırmaya Esas Veriş Miktarı (Tarih, Saat, UEVM)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = __first_part_url + "ssv-categorized" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["ssvList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"asphaltite": "Asfaltit Kömür", "river": "Akarsu", "dam": "Barajlı",
"biomass": "Biyokütle", "naturalGas": "Doğalgaz", "fueloil": "Fuel Oil",
"importedCoal": "İthal Kömür", "geothermal": "Jeo Termal", "lignite": "Linyit",
"naphtha": "Nafta", "lng": "LNG", "wind": "Rüzgar", "stonecoal": "Taş Kömür",
"international": "Uluslararası", "total": "Toplam", "other": "Diğer"},
inplace=True)
df = df[
["Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar",
"Fuel Oil", "Jeo Termal", "Asfaltit Kömür", "Taş Kömür", "Biyokütle", "Nafta", "LNG", "Uluslararası",
"Diğer", "Toplam"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), santral_id=""):
"""
İlgili tarih aralığı için lisanslı santrallerin toplam gerçek zamanlı üretim bilgisini vermektedir.
Not: "santral_id" değeri girildiği taktirde santrale ait gerçek zamanlı üretim bilgisini vermektedir.
Girilmediği taktirde toplam gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
santral_id : metin yada tam sayı formatında santral id (Varsayılan: "")
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Üretim("Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "<NAME>", "Rüzgar", "Güneş",
"Fuel Oil", "Jeo Termal", "Asf<NAME>", "Ta<NAME>", "Biokütle", "Nafta", "LNG", "Uluslararası",
"Toplam")
"""
if __dogrulama.__baslangic_bitis_tarih_id_dogrulama(baslangic_tarihi, bitis_tarihi, santral_id):
if santral_id == "":
return __gerceklesen(baslangic_tarihi, bitis_tarihi)
else:
return __santral_bazli_gerceklesen(baslangic_tarihi, bitis_tarihi, santral_id)
def tum_santraller_gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için tüm lisanslı santrallerin gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Tüm lisanslı santrallerin gerçek zamanlı üretim Değerleri (Tarih, Saat, Santral Üretimleri)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
sant = __santraller.gercek_zamanli_uretim_yapan_santraller()
list_sant = sant[["Id", "Kısa Adı"]].to_dict("records")
list_sant_len = len(list_sant)
list_sant = list(
zip([baslangic_tarihi] * list_sant_len, [bitis_tarihi] * list_sant_len, list_sant))
list_sant = list(map(list, list_sant))
with __Pool(__mp.cpu_count()) as p:
list_df_unit = p.starmap(__gerceklesen_santral, list_sant, chunksize=1)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True),
list_df_unit)
return df_unit
def gddk(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için geriye dönük düzeltme kalemine (GDDK) ait değerleri vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Dönemlik GDDK (₺)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "gddk-amount" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["gddkAmountList"])
df["Dönem"] = df["id"].apply(
lambda x: str(__pd.to_datetime(x["date"][:10]).month_name(locale='tr_TR.UTF-8')) + "-" + str(
__pd.to_datetime(x["date"][:10]).year))
df["Versiyon"] = df["id"].apply(
lambda x: str(__pd.to_datetime(x["version"][:10]).month_name(locale='tr_TR.UTF-8')) + "-" + str(
__pd.to_datetime(x["version"][:10]).year))
df.rename(index=str,
columns={"gddkCreditAmount": "Alacak GDDK Tutarı (TL)", "gddkDebtAmount": "Borç GDDK Tutarı (TL)",
"gddkNetAmount": "Net GDDK Tutarı (TL)"}, inplace=True)
df = df[["Dönem", "Versiyon", "Alacak GDDK Tutarı (TL)", "Borç GDDK Tutarı (TL)", "Net GDDK Tutarı (TL)"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def __gerceklesen(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için lisanslı santrallerin toplam gerçek zamanlı üretim bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Gerçek Zamanlı Üretim("Tarih", "Saat", "Doğalgaz", "Barajlı", "Linyit", "Akarsu", "İthal Kömür", "Rüzgar", "Güneş",
"Fuel Oil", "Jeo Termal", "Asfaltit Kömür", "Taş Kömür", "Biokütle", "Nafta", "LNG", "Uluslararası",
"Toplam")
"""
try:
particular_url = __first_part_url + "real-time-generation" + "?startDate=" + baslangic_tarihi + "&endDate=" \
+ bitis_tarihi
json = __make_requests(particular_url)
df = | __pd.DataFrame(json["body"]["hourlyGenerations"]) | pandas.DataFrame |
import pandas as pd
import os
from .objects import ECause, EState, trade_from_dict
from .enums import *
from decimal import ROUND_DOWN, Decimal
import logging
from logging.handlers import TimedRotatingFileHandler
import time
from .safe_operators import *
def calculate_fee(amount, fee, digit=8):
return round(safe_multiply(amount,fee), digit)
def time_scale_to_minute(interval: str):
seconds_per_unit = {
"m": 1,
"h": 60,
"d": 24 * 60,
"w": 7 * 24 * 60,
}
try:
return int(interval[:-1]) * seconds_per_unit[interval[-1]]
except (ValueError, KeyError):
return None
def round_step_downward(quantity, step_size):
# NOTE: if the step_size is '1.0', 1.2389196468651802 is rounded as 1.2 instead of 1.
# Thus if the step_size is an integer then we should approach properly
if step_size.is_integer(): step_size = int(step_size)
return float(Decimal(str(quantity)).quantize(Decimal(str(step_size)), rounding=ROUND_DOWN))
def truncate(num,n):
temp = str(num)
for x in range(len(temp)):
if temp[x] == '.':
try:
return float(temp[:x+n+1])
except:
return float(temp)
return float(temp)
def time_scale_to_second(interval: str):
return time_scale_to_minute(interval) * 60
def time_scale_to_milisecond(interval: str):
return time_scale_to_minute(interval) * 60 * 1000
def eval_total_capital(df_balance, live_trade_list, quote_currency, max_capital_use_ratio=1):
# Toal capital: Free QC + LTO_enter
free_qc = df_balance.loc[quote_currency,'free']
# NOTE: In-trade balance is calculated only by considering the LTOs of the Ikarus
# Using only the df_balance requires live updates and evaluation of each asset in terms of QC
# NOTE: If state of a TO is:
# 'closed': then the amount that is used by this TO is reflected back to main capital (df_balance in backtest (by lto_update))
# : these LTOs needs be omitted
# 'enter_expire': then it is marked to be handled by the their strategy but the balance is still locked in LTO
in_trade_qc = eval_total_capital_in_lto(live_trade_list)
total_qc = safe_sum(free_qc, in_trade_qc)
return safe_multiply(total_qc, max_capital_use_ratio)
def eval_total_capital_in_lto(trade_list):
in_trade_qc = 0
for trade in trade_list:
# Omit the LTOs that are closed, because their use of amount returned to df_balance (by broker or by lto_update of test-engine)
if trade.status != EState.CLOSED:
# NOTE: It is assumed that each object may only have 1 TYPE of exit or enter
in_trade_qc = safe_sum(in_trade_qc, trade.enter.amount)
return in_trade_qc
async def get_closed_hto(config, mongocli, query={'result.cause':ECause.CLOSED}):
# TODO: NEXT: All statistics needs to be changed a bit to integrate market orders
# Read Database to get hist-trades and dump to a DataFrame
hto_list = await mongocli.do_find('hist-trades',query)
hto_closed = []
for hto in hto_list:
trade = trade_from_dict(hto)
hto_dict = {
"_id": trade._id,
"strategy": trade.strategy,
"decision_time": trade.decision_time,
"enterTime": trade.result.enter.time,
"enterPrice": trade.enter.price,
"exitTime": trade.result.exit.time,
"exitPrice": trade.exit.price,
"sellPrice": trade.result.exit.price
}
# NOTE: No trade.result.enter.price is used because in each case Limit/Market enter the price value will be used directly
hto_closed.append(hto_dict)
df = | pd.DataFrame(hto_closed) | pandas.DataFrame |
"""
hspfbintoolbox to read HSPF binary files.
"""
from __future__ import print_function
import datetime
import warnings
import os
import sys
import struct
import mando
from mando.rst_text_formatter import RSTHelpFormatter
import pandas as pd
from tstoolbox import tsutils
code2intervalmap = {5: "yearly", 4: "monthly", 3: "daily", 2: "bivl"}
interval2codemap = {"yearly": 5, "monthly": 4, "daily": 3, "bivl": 2}
code2freqmap = {5: "A", 4: "M", 3: "D", 2: None}
_LOCAL_DOCSTRINGS = {
"hbnfilename": r"""hbnfilename: str
The HSPF binary output file. This file must have been created from
a completed model run."""
}
def tupleMatch(a, b):
"""Part of partial ordered matching.
See http://stackoverflow.com/a/4559604
"""
return len(a) == len(b) and all(
i is None or j is None or i == j for i, j in zip(a, b)
)
def tupleCombine(a, b):
"""Part of partial ordered matching.
See http://stackoverflow.com/a/4559604
"""
return tuple([i is None and j or i for i, j in zip(a, b)])
def tupleSearch(findme, haystack):
"""Partial ordered matching with 'None' as wildcard
See http://stackoverflow.com/a/4559604
"""
return [
(i, tupleCombine(findme, h))
for i, h in enumerate(haystack)
if tupleMatch(findme, h)
]
def _get_data(binfilename, interval="daily", labels=[",,,"], catalog_only=True):
"""Underlying function to read from the binary file. Used by
'extract', 'catalog', and 'dump'.
"""
testem = {
"PERLND": [
"ATEMP",
"SNOW",
"PWATER",
"SEDMNT",
"PSTEMP",
"PWTGAS",
"PQUAL",
"MSTLAY",
"PEST",
"NITR",
"PHOS",
"TRACER",
"",
],
"IMPLND": ["ATEMP", "SNOW", "IWATER", "SOLIDS", "IWTGAS", "IQUAL", ""],
"RCHRES": [
"HYDR",
"CONS",
"HTRCH",
"SEDTRN",
"GQUAL",
"OXRX",
"NUTRX",
"PLANK",
"PHCARB",
"INFLOW",
"OFLOW",
"ROFLOW",
"",
],
"BMPRAC": [""],
"": [""],
}
collect_dict = {}
lablist = []
# Normalize interval code
try:
intervalcode = interval2codemap[interval.lower()]
except AttributeError:
intervalcode = None
# Fixup and test the labels - could be in it's own function
for lindex, label in enumerate(labels):
words = [lindex] + label.split(",")
if len(words) != 5:
raise ValueError(
tsutils.error_wrapper(
"""
The label '{0}' has the wrong number of entries.
""".format(
label
)
)
)
words = [None if i == "" else i for i in words]
if words[1] is not None:
words[1] = words[1].upper()
if words[1] not in testem.keys():
raise ValueError(
tsutils.error_wrapper(
"""
Operation type must be one of 'PERLND', 'IMPLND', 'RCHRES', or 'BMPRAC',
or missing (to get all) instead of {0}.
""".format(
words[1]
)
)
)
if words[2] is not None:
try:
words[2] = int(words[2])
if words[2] < 1 or words[2] > 999:
raise ValueError()
except (ValueError, TypeError):
raise ValueError(
tsutils.error_wrapper(
"""
The land use element must be an integer from 1 to 999 inclusive,
instead of {0}.
""".format(
words[2]
)
)
)
if words[3] is not None:
words[3] = words[3].upper()
if words[3] not in testem[words[1]]:
raise ValueError(
tsutils.error_wrapper(
"""
The {0} operation type only allows the variable groups:
{1},
instead you gave {2}.
""".format(
words[1], testem[words[1]][:-1], words[3]
)
)
)
words.append(intervalcode)
lablist.append(words)
with open(binfilename, "rb") as fl:
mindate = datetime.datetime.max
maxdate = datetime.datetime.min
labeltest = {}
vnames = {}
ndates = {}
rectype = 0
fl.read(1)
while True:
try:
reclen1, reclen2, reclen3, reclen = struct.unpack("4B", fl.read(4))
except struct.error:
# End of file.
break
rectype, optype, lue, section = struct.unpack("I8sI8s", fl.read(24))
rectype = int(rectype)
lue = int(lue)
optype = optype.strip()
section = section.strip()
slen = 0
if rectype == 0:
reclen1 = int(reclen1 / 4)
reclen2 = reclen2 * 64 + reclen1
reclen3 = reclen3 * 16384 + reclen2
reclen = reclen * 4194304 + reclen3 - 24
while slen < reclen:
length = struct.unpack("I", fl.read(4))[0]
slen = slen + length + 4
variable_name = struct.unpack(
"{0}s".format(length), fl.read(length)
)[0]
vnames.setdefault((lue, section), []).append(variable_name)
elif rectype == 1:
# Data record
numvals = len(vnames[(lue, section)])
(_, level, year, month, day, hour, minute) = struct.unpack(
"7I", fl.read(28)
)
vals = struct.unpack("{0}f".format(numvals), fl.read(4 * numvals))
if hour == 24:
ndate = (
datetime.datetime(year, month, day)
+ datetime.timedelta(hours=24)
+ datetime.timedelta(minutes=minute)
)
else:
ndate = datetime.datetime(year, month, day, hour, minute)
for i, vname in enumerate(vnames[(lue, section)]):
tmpkey = (
None,
optype.decode("ascii"),
int(lue),
section.decode("ascii"),
vname.decode("ascii"),
level,
)
if catalog_only is False:
res = tupleSearch(tmpkey, lablist)
if res:
nres = (res[0][0],) + res[0][1][1:]
labeltest[nres[0]] = 1
collect_dict.setdefault(nres, []).append(vals[i])
ndates.setdefault(level, {})[ndate] = 1
else:
mindate = min(mindate, ndate)
maxdate = max(maxdate, ndate)
pdoffset = code2freqmap[level]
collect_dict[tmpkey[1:]] = (
pd.Period(mindate, freq=pdoffset),
pd.Period(maxdate, freq=pdoffset),
)
else:
fl.seek(-31, 1)
# The following should be 1 or 2, but I don't know how to calculate
# it, so I just use that the 'rectype' must be 0 or 1, and if not
# rewind the correct amount.
fl.read(2)
if not collect_dict:
raise ValueError(
tsutils.error_wrapper(
"""
The label specifications below matched no records in the binary file.
{lablist}
""".format(
**locals()
)
)
)
if catalog_only is False:
not_in_file = []
for loopcnt in list(range(len(lablist))):
if loopcnt not in labeltest.keys():
not_in_file.append(labels[loopcnt])
if not_in_file:
warnings.warn(
tsutils.error_wrapper(
"""
The specification{0} {1}
matched no records in the binary file.
""".format(
"s"[len(not_in_file) == 1 :], not_in_file
)
)
)
return ndates, collect_dict
@mando.command("extract", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.merge_dicts(tsutils.docstrings, _LOCAL_DOCSTRINGS))
def extract_cli(hbnfilename, interval, *labels, **kwds):
r"""Prints out data to the screen from a HSPF binary output file.
Parameters
----------
{hbnfilename}
interval: str
One of 'yearly', 'monthly', 'daily', or 'BIVL'. The 'BIVL' option is
a sub-daily interval defined in the UCI file. Typically 'BIVL' is used
for hourly output, but can be set to any value that evenly divides into
a day.
labels: str
The remaining arguments uniquely identify a time-series in the
binary file. The format is 'OPERATIONTYPE,ID,VARIABLE_GROUP,VARIABLE'.
For example: 'PERLND,101,PWATER,UZS IMPLND,101,IWATER,RETS'
Leaving a section without an entry will wildcard that
specification. To get all the PWATER variables for PERLND 101 the
label would read:
'PERLND,101,PWATER,'
To get TAET for all PERLNDs:
'PERLND,,,TAET'
Note that there are spaces ONLY between label specifications.
OPERATIONTYE can be PERLND, IMPLND, RCHRES, and BMPRAC.
ID is specified in the UCI file.
VARIABLE_GROUP depends on OPERATIONTYPE where::
if OPERATIONTYPE is PERLND then VARIABLEGROUP can be one of
'ATEMP', 'SNOW', 'PWATER', 'SEDMNT', 'PSTEMP', 'PWTGAS',
'PQUAL', 'MSTLAY', 'PEST', 'NITR', 'PHOS', 'TRACER'
if OPERATIONTYPE is IMPLND then VARIABLEGROUP can be one of
'ATEMP', 'SNOW', 'IWATER', 'SOLIDS', 'IWTGAS', 'IQUAL'
if OPERATIONTYPE is RCHRES then VARIABLEGROUP can be one of
'HYDR', 'CONS', 'HTRCH', 'SEDTRN', 'GQUAL', 'OXRX', 'NUTRX',
'PLANK', 'PHCARB', 'INFLOW', 'OFLOW', 'ROFLOW'
if OPERATIONTYPE is BMPRAC then there is no VARIABLEGROUP and you
have to leave VARIABLEGROUP as a wild card. For example,
'BMPRAC,875,,RMVOL'.
kwds:
Current the allowable keywords are 'time_stamp' and
'sorted'.
time_stamp:
[optional, default is 'begin']
For the interval defines the location of the time stamp. If set to
'begin', the time stamp is at the beginning of the interval. If set to
any other string, the reported time stamp will represent the end of the
interval. Place after ALL labels.
sorted:
[optional, default is False]
Should ALL columns be sorted? Place after ALL labels."""
tsutils._printiso(extract(hbnfilename, interval, *labels, **kwds))
def extract(hbnfilename, interval, *labels, **kwds):
r"""Returns a DataFrame from a HSPF binary output file."""
try:
time_stamp = kwds.pop("time_stamp")
except KeyError:
time_stamp = "begin"
if time_stamp not in ["begin", "end"]:
raise ValueError(
tsutils.error_wrapper(
"""
The "time_stamp" optional keyword must be either
"begin" or "end". You gave {0}.
""".format(
time_stamp
)
)
)
try:
sortall = bool(kwds.pop("sorted"))
except KeyError:
sortall = False
if not (sortall is True or sortall is False):
raise ValueError(
tsutils.error_wrapper(
"""
The "sorted" optional keyword must be either
True or False. You gave {0}.
""".format(
sortall
)
)
)
if len(kwds) > 0:
raise ValueError(
tsutils.error_wrapper(
"""
The extract command only accepts optional keywords 'time_stamp' and
'sorted'. You gave {0}.
""".format(
list(kwds.keys())
)
)
)
interval = interval.lower()
if interval not in ["bivl", "daily", "monthly", "yearly"]:
raise ValueError(
tsutils.error_wrapper(
"""
The "interval" argument must be one of "bivl",
"daily", "monthly", or "yearly". You supplied
"{0}".
""".format(
interval
)
)
)
index, data = _get_data(hbnfilename, interval, labels, catalog_only=False)
index = index[interval2codemap[interval]]
index = sorted(index.keys())
skeys = list(data.keys())
if sortall is True:
skeys.sort(key=lambda tup: tup[1:])
else:
skeys.sort()
result = pd.DataFrame(
pd.concat(
[pd.Series(data[i], index=index) for i in skeys], sort=False, axis=1
).reindex(pd.Index(index))
)
columns = ["{0}_{1}_{2}_{3}".format(i[1], i[2], i[4], i[5]) for i in skeys]
result.columns = columns
if time_stamp == "begin":
result = tsutils.asbestfreq(result)
result = result.tshift(-1)
result.index.name = "Datetime"
return result
@mando.command("catalog", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.merge_dicts(tsutils.docstrings, _LOCAL_DOCSTRINGS))
def catalog_cli(hbnfilename, tablefmt="simple", header="default"):
"""
Prints out a catalog of data sets in the binary file.
The first four items of each line can be used as labels with the 'extract'
command to identify time-series in the binary file.
Parameters
----------
{hbnfilename}
{tablefmt}
{header}
"""
if header == "default":
header = ["LUE", "LC", "GROUP", "VAR", "TC", "START", "END", "TC"]
tsutils._printiso(catalog(hbnfilename), tablefmt=tablefmt, headers=header)
def catalog(hbnfilename):
"""
Prints out a catalog of data sets in the binary file.
"""
# PERLND 905 PWATER SURS 5 1951 2001 yearly
# PERLND 905 PWATER TAET 5 1951 2001 yearly
catlog = _get_data(hbnfilename, None, [",,,"], catalog_only=True)[1]
catkeys = sorted(catlog.keys())
result = []
for cat in catkeys:
result.append(cat + catlog[cat] + (code2intervalmap[cat[-1]],))
return result
@mando.command("dump", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(tsutils.merge_dicts(tsutils.docstrings, _LOCAL_DOCSTRINGS))
def dump_cli(hbnfilename, time_stamp="begin"):
"""
Prints out ALL data from a HSPF binary output file.
Parameters
----------
{hbnfilename}
time_stamp
[optional, default is 'begin']
For the interval defines the location of the time stamp. If set
to 'begin', the time stamp is at the begining of the interval.
If set to any other string, the reported time stamp will
represent the end of the interval. Default is 'begin'.Z
"""
tsutils._printiso(dump(hbnfilename, time_stamp=time_stamp))
def dump(hbnfilename, time_stamp="begin"):
"""
Prints out ALL data from a HSPF binary output file.
"""
if time_stamp not in ["begin", "end"]:
raise ValueError(
tsutils.error_wrapper(
"""
The "time_stamp" optional keyword must be either
"begin" or "end". You gave {0}.
""".format(
time_stamp
)
)
)
index, data = _get_data(hbnfilename, None, [",,,"], catalog_only=False)
skeys = sorted(data.keys())
result = pd.DataFrame(
pd.concat(
[ | pd.Series(data[i], index=index) | pandas.Series |
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
def test_col():
from anaphora import Col
# attribute access
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').values(df)
expected = df['y'].values
assert isinstance(actual, np.ndarray)
npt.assert_array_equal(actual, expected)
# attribute chaining (!!)
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').values(df).dtype
expected = df['y'].values.dtype
npt.assert_array_equal(actual, expected)
# method chaining
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = Col('y').map({1: '1', 2: '2'}).astype('category')(df)
expected = df['y'].map({1: '1', 2: '2'}).astype('category')
pdt.assert_series_equal(actual, expected)
# magic method chaining
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
actual = ((Col('y') + 3) * 10)(df)
expected = (df['y'] + 3) * 10
pdt.assert_series_equal(actual, expected)
# loc, scalar output
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = Col('x').loc['c'](df)
expected = 6
assert int(actual) == expected
# loc, vector output
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = Col('x').loc[['a','c']](df)
expected = pd.Series([4,6], index=['a','c'], name='x')
pdt.assert_series_equal(actual, expected)
def test_with_column():
from anaphora import Col, with_column
# replace a column
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'x', Col() * 10)
expected = df.copy()
expected['x'] = df['x'] * 10
pdt.assert_frame_equal(actual, expected)
# add a column
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'z', Col('x') * 10)
expected = df.copy()
expected['z'] = df['x'] * 10
pdt.assert_frame_equal(actual, expected)
# subset with scalar loc
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'z', Col('y') * 10, loc='b')
expected = df.copy()
expected.loc['b', 'z'] = df.loc['b', 'y'] * 10
pdt.assert_frame_equal(actual, expected)
# subset with scalar iloc
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'z', Col('y') * 10, iloc=1)
expected = df.copy()
expected.loc[expected.index[1], 'z'] = df['y'].iloc[1] * 10
pdt.assert_frame_equal(actual, expected)
# subset with vector loc
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'z', Col('y') * 10, loc=['a', 'b'])
expected = df.copy()
expected.loc[['a', 'b'], 'z'] = df.loc[['a', 'b'], 'y'] * 10
pdt.assert_frame_equal(actual, expected)
# subset with vector iloc
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
actual = with_column(df, 'z', Col('y') * 10, iloc=[1, 2])
expected = df.copy()
expected.loc[expected.index[[1, 2]], 'z'] = df['y'].iloc[[1,2]] * 10
pdt.assert_frame_equal(actual, expected)
# no-name shortcut
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
assert pd.testing.assert_frame_equal(
with_column(df, 'y', Col('y') * 10, iloc=[1, 2]),
with_column(df, 'y', Col() * 10, iloc=[1, 2])
)
with pytest.raises(KeyError) as exc_info:
with_column(df, 'z', Col() * 10, iloc=[1, 2])
assert str(exc_info.value) == 'z'
# don't mutate original
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]}, index=list('abc'))
df2 = with_column(df, 'y', Col('y') * 10, iloc=[1, 2]),
with pytest.raises(AssertionError):
pd.testing.assert_frame_equal(df, df2)
def test_mutate():
from anaphora import Col, mutate, mutate_sequential
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
expected = pd.DataFrame({'y': [0,1,2], 'x': [5,6,7]})
actual = mutate(df, y=Col('x')-1, z=Col('y')+1)
df = pd.DataFrame({'y': [1,2,3], 'x': [4,5,6]})
expected = | pd.DataFrame({'y': [0,1,2], 'x': [1,2,3]}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 26 12:59:14 2019
@author: akurnizk
"""
# import utm
import csv
import math
# import flopy
import sys,os
import calendar
import dateutil
import numpy as np
import pandas as pd
import matplotlib as mpl
import seaborn as sns; sns.set()
mpl.rc('xtick', labelsize=22)
mpl.rc('ytick', labelsize=22)
mpl.rcParams['pdf.fonttype'] = 42
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# import flopy.utils.binaryfile as bf
cgw_code_dir = 'E:\Python KMB - CGW' # Location of BitBucket folder containing cgw folder
sys.path.insert(0,cgw_code_dir)
from matplotlib import pylab
from scipy.io import loadmat
# from shapely.geometry import Point
from datetime import datetime, time, timedelta
# from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
# Assign name and create modflow model object
work_dir = os.path.join('E:\Herring Models\Seasonal')
data_dir = os.path.join('E:\Data')
mean_sea_level = 0.843 # Datum in meters at closest NOAA station (8447435), Chatham, Lydia Cove MA
# https://tidesandcurrents.noaa.gov/datums.html?units=1&epoch=0&id=8447435&name=Chatham%2C+Lydia+Cove&state=MA
# use this value for the right boundary
# use data from sonic recorder on ocean side of dike for left boundary
#%% Loading Information from HR Dike Sensors
with open(os.path.join(data_dir,"Surface Water Level Data","USGS 011058798 Herring R at Chequessett Neck Rd","Gage height_ft.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_dike_all_levels = list(reader)
HR_dike_oceanside_levels = []
HR_dike_HRside_levels = []
for line in range(len(HR_dike_all_levels)-31):
HR_dike_oceanside_levels.append([HR_dike_all_levels[line+31][2],HR_dike_all_levels[line+31][4]])
HR_dike_HRside_levels.append([HR_dike_all_levels[line+31][2],HR_dike_all_levels[line+31][6]])
HR_dike_oceanside_levels = np.array(HR_dike_oceanside_levels)
HR_dike_HRside_levels = np.array(HR_dike_HRside_levels)
#%% Dike Levels
"""
Ocean side of dike
"""
# date2num returns Number of days (fraction part represents hours, minutes, seconds, ms) since 0001-01-01 00:00:00 UTC, plus one.
x_oceanside, y_oceanside = HR_dike_oceanside_levels.T
dates_oceanside = [dateutil.parser.parse(x) for x in x_oceanside]
x_oceanside_datenum = mdates.date2num(dates_oceanside)
y_oceanside[np.where(y_oceanside == '')] = np.nan
y_oceanside = y_oceanside.astype(np.float)*0.3048 # feet to meters
pylab.plot(x_oceanside_datenum, y_oceanside, 'o', markersize=1)
idx_oceanside = np.isfinite(x_oceanside_datenum) & np.isfinite(y_oceanside)
z_oceanside = np.polyfit(x_oceanside_datenum[idx_oceanside], y_oceanside[idx_oceanside], 1)
p_oceanside = np.poly1d(z_oceanside)
polyX_oceanside = np.linspace(x_oceanside_datenum.min(), x_oceanside_datenum.max(), 100)
pylab.plot(polyX_oceanside,p_oceanside(polyX_oceanside),"c", label='Mean Sea Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_oceanside[0],z_oceanside[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
for i in range(len(y_oceanside)):
if ~np.isnan(y_oceanside[i]):
print("Index value for starting date of oceanside data = ",i)
date_oceanside_start = x_oceanside[i]
break
for i in reversed(range(len(y_oceanside))):
if ~np.isnan(y_oceanside[i]):
print("Index value for ending date of oceanside data = ",i)
date_oceanside_end = x_oceanside[i]
break
sealev_june2015 = z_oceanside[0]*x_oceanside_datenum.min()+z_oceanside[1]
sealev_today = z_oceanside[0]*x_oceanside_datenum.max()+z_oceanside[1]
slr_june2015tojuly2019 = sealev_today-sealev_june2015
slr_oneyear = slr_june2015tojuly2019/1484*365
"""
Herring River side of dike
"""
# date2num returns Number of days (fraction part represents hours, minutes, seconds, ms) since 0001-01-01 00:00:00 UTC, plus one.
x_HRside, y_HRside = HR_dike_HRside_levels.T
dates_HRside = [dateutil.parser.parse(x) for x in x_HRside]
x_HRside_datenum = mdates.date2num(dates_HRside)
y_HRside[np.where(y_HRside == '')] = np.nan
y_HRside[np.where(y_HRside == 'Eqp')] = np.nan # remove equipment failures
y_HRside = y_HRside.astype(np.float)*0.3048 # feet to meters
pylab.plot(x_HRside_datenum, y_HRside, '+', markersize=1)
idx_HRside = np.isfinite(x_HRside_datenum) & np.isfinite(y_HRside)
z_HRside = np.polyfit(x_HRside_datenum[idx_HRside], y_HRside[idx_HRside], 1)
p_HRside = np.poly1d(z_HRside)
polyX_HRside = np.linspace(x_HRside_datenum.min(), x_HRside_datenum.max(), 100)
pylab.plot(polyX_HRside,p_HRside(polyX_HRside),"r", label='Mean River Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_HRside[0],z_HRside[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.legend()
pylab.show()
for i in range(len(y_HRside)):
if ~np.isnan(y_HRside[i]):
print(i)
date_HRside_start = x_HRside[i]
break
for i in reversed(range(len(y_HRside))):
if ~np.isnan(y_HRside[i]):
print(i)
date_HRside_end = x_HRside[i]
break
rivlev_june2015 = z_HRside[0]*x_HRside_datenum.min()+z_HRside[1]
rivlev_today = z_HRside[0]*x_HRside_datenum.max()+z_HRside[1]
rlr_june2015tojuly2019 = rivlev_today-rivlev_june2015
rlr_oneyear = rlr_june2015tojuly2019/1484*365
difinmeans_june2015 = sealev_june2015 - rivlev_june2015
difinmeans_july2019 = sealev_today - rivlev_today
#%% Dike Discharges
"""
Discharge through dike
Measurements are taken every 5 minutes
Filtering takes a 1 hour average
"""
with open(os.path.join(data_dir,"Surface Water Level Data","USGS 011058798 Herring R at Cheques<NAME> Rd","Discharge_cfs_Discharge.txt")) as f:
reader = csv.reader(f, delimiter="\t")
HR_dike_all_discharge = list(reader)
HR_dike_discharge = []
HR_dike_discharge_filtered = []
for line in range(len(HR_dike_all_discharge)-30):
HR_dike_discharge.append([HR_dike_all_discharge[line+30][2],HR_dike_all_discharge[line+30][4]])
HR_dike_discharge_filtered.append([HR_dike_all_discharge[line+30][2],HR_dike_all_discharge[line+30][6]])
HR_dike_discharge = np.array(HR_dike_discharge)
HR_dike_discharge_filtered = np.array(HR_dike_discharge_filtered)
x_discharge, y_discharge = HR_dike_discharge.T
x_discharge_filtered, y_discharge_filtered = HR_dike_discharge_filtered.T
dates_discharge = [dateutil.parser.parse(x) for x in x_discharge]
dates_discharge_filtered = [dateutil.parser.parse(x) for x in x_discharge_filtered]
x_discharge_datenum = mdates.date2num(dates_discharge)
x_discharge_filtered_datenum = mdates.date2num(dates_discharge_filtered)
y_discharge[np.where(y_discharge == '')] = np.nan
y_discharge_filtered[np.where(y_discharge_filtered == '')] = np.nan
y_discharge = y_discharge.astype(np.float)*0.028316847 # cfs to cms
y_discharge_filtered = y_discharge_filtered.astype(np.float)*0.028316847 # cfs to cms
# Plotting
plt.figure()
pylab.plot(x_discharge_datenum, y_discharge, '+', markersize=1)
pylab.plot(x_discharge_filtered_datenum, y_discharge_filtered, 'o', markersize=1)
# Trendline (eliminates no-data points from consideration)
idx_discharge = np.isfinite(x_discharge_datenum) & np.isfinite(y_discharge)
idx_discharge_filtered = np.isfinite(x_discharge_filtered_datenum) & np.isfinite(y_discharge_filtered)
z_discharge = np.polyfit(x_discharge_datenum[idx_discharge], y_discharge[idx_discharge], 1)
z_discharge_filtered = np.polyfit(x_discharge_filtered_datenum[idx_discharge_filtered], y_discharge_filtered[idx_discharge_filtered], 1)
p_discharge = np.poly1d(z_discharge)
p_discharge_filtered = np.poly1d(z_discharge_filtered)
polyX_discharge = np.linspace(x_discharge_datenum.min(), x_discharge_datenum.max(), 100)
polyX_discharge_filtered = np.linspace(x_discharge_filtered_datenum.min(), x_discharge_filtered_datenum.max(), 100)
pylab.plot(polyX_discharge, p_discharge(polyX_discharge),"g", label='Mean Unfiltered Discharge (cms)')
pylab.plot(polyX_discharge_filtered, p_discharge_filtered(polyX_discharge_filtered), "m", label='Mean Filtered Discharge (cms)')
# the line equations:
print("Unfiltered, "+("y=%.6fx+(%.6f)"%(z_discharge[0],z_discharge[1])))
print("Filtered, "+("y=%.6fx+(%.6f)"%(z_discharge_filtered[0],z_discharge_filtered[1])))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.gca().set_ylim([-0.4,0.2]) # set the y-axis limits
plt.xlabel('Date', fontsize=18)
plt.ylabel('Discharge (cms), Elevation (m)', fontsize=16)
# set x bounds to discharge times for sea and river level polyfits
polyX_HRside = np.linspace(x_discharge_datenum.min(), x_discharge_datenum.max(), 100)
polyX_oceanside = np.linspace(x_discharge_datenum.min(), x_discharge_datenum.max(), 100)
pylab.plot(polyX_discharge,p_HRside(polyX_discharge),"r", label='Mean River Level (m)')
pylab.plot(polyX_discharge,p_oceanside(polyX_discharge),"c", label='Mean Sea Level (m)')
plt.legend()
pylab.show()
for i in range(len(y_discharge)):
if ~np.isnan(y_discharge[i]):
date_discharge_start = x_discharge[i]
print(i, date_discharge_start)
break
for i in reversed(range(len(y_discharge))):
if ~np.isnan(y_discharge[i]):
date_discharge_end = x_discharge[i]
print(i, date_discharge_end)
break
# Max and min of trendlines
discharge_june2015 = z_discharge[0]*x_discharge_datenum.min()+z_discharge[1]
discharge_oct2017 = z_discharge[0]*x_discharge_datenum.max()+z_discharge[1]
change_in_discharge_june2015tooct2017 = discharge_june2015-discharge_oct2017
change_in_discharge_oneyear = change_in_discharge_june2015tooct2017/(x_discharge_datenum[-1]-x_discharge_datenum[0])*365
print("Unfiltered discharge is decreasing by ", ("%.3f"%(change_in_discharge_oneyear)), "cms per year.")
print("Unfiltered discharge goes from an average of ", ("%.3f"%(discharge_june2015)), "cms in June 2015 (into ocean)")
print("to ", ("%.3f"%(discharge_oct2017)), "cms in October 2017 (into river).")
discharge_filtered_june2015 = z_discharge_filtered[0]*x_discharge_filtered_datenum.min()+z_discharge_filtered[1]
discharge_filtered_oct2017 = z_discharge_filtered[0]*x_discharge_filtered_datenum.max()+z_discharge_filtered[1]
change_in_filt_discharge_june2015tooct2017 = discharge_filtered_june2015-discharge_filtered_oct2017
change_in_filt_discharge_oneyear = change_in_filt_discharge_june2015tooct2017/(x_discharge_filtered_datenum[-1]-x_discharge_filtered_datenum[0])*365
print("Filtered discharge is decreasing by ", ("%.3f"%(change_in_filt_discharge_oneyear)), "cms per year.")
print("Filtered discharge goes from an average of ", ("%.3f"%(discharge_filtered_june2015)), "cms in June 2015 (into ocean)")
print("to ", ("%.3f"%(discharge_filtered_oct2017)), "cms in October 2017 (into river).")
#%% Mean High and Low Dike Levels, Hourly
"""
Mean High and Low Tides at Dike (to determine changes in amplitude and amplitude decay from ocean to river)
"""
# Need to remove nan vals and reduce measurement frequency
nan_indices_oceanside = []
for i in range(len(y_oceanside)):
if np.isnan(y_oceanside[i]):
nan_indices_oceanside.append(i)
nan_indices_HRside = []
for i in range(len(y_HRside)):
if np.isnan(y_HRside[i]):
nan_indices_HRside.append(i)
y_oceanside_nonans = y_oceanside.tolist()
x_oceanside_datenum_nonans = x_oceanside_datenum.tolist()
for index in sorted(nan_indices_oceanside, reverse=True):
del y_oceanside_nonans[index]
del x_oceanside_datenum_nonans[index]
y_oceanside_nonans = np.array(y_oceanside_nonans)
x_oceanside_datenum_nonans = np.array(x_oceanside_datenum_nonans)
y_HRside_nonans = y_HRside.tolist()
x_HRside_datenum_nonans = x_HRside_datenum.tolist()
for index in sorted(nan_indices_HRside, reverse=True):
del y_HRside_nonans[index]
del x_HRside_datenum_nonans[index]
y_HRside_nonans = np.array(y_HRside_nonans)
x_HRside_datenum_nonans = np.array(x_HRside_datenum_nonans)
# convert numbered datetime back to standard (allows determination of minutes)
dates_oceanside_nonans = mdates.num2date(x_oceanside_datenum_nonans)
dates_HRside_nonans = mdates.num2date(x_HRside_datenum_nonans)
hourly_indices_oceanside = []
for i in range(len(dates_oceanside_nonans)):
if dates_oceanside_nonans[i].minute == 0: # minute is only zero on the hour
hourly_indices_oceanside.append(i)
hourly_indices_HRside = []
for i in range(len(dates_HRside_nonans)):
if dates_HRside_nonans[i].minute == 0: # minute is only zero on the hour
hourly_indices_HRside.append(i)
y_oceanside_hourly = []
x_oceanside_datenum_hourly = []
for index in sorted(hourly_indices_oceanside):
y_oceanside_hourly.append(y_oceanside_nonans[index])
x_oceanside_datenum_hourly.append(x_oceanside_datenum_nonans[index])
y_oceanside_hourly = np.array(y_oceanside_hourly)
x_oceanside_datenum_hourly = np.array(x_oceanside_datenum_hourly)
y_HRside_hourly = []
x_HRside_datenum_hourly = []
for index in sorted(hourly_indices_HRside):
y_HRside_hourly.append(y_HRside_nonans[index])
x_HRside_datenum_hourly.append(x_HRside_datenum_nonans[index])
y_HRside_hourly = np.array(y_HRside_hourly)
x_HRside_datenum_hourly = np.array(x_HRside_datenum_hourly)
# plot hourly levels
pylab.plot(x_oceanside_datenum_hourly, y_oceanside_hourly, 'o', markersize=1)
pylab.plot(x_HRside_datenum_hourly, y_HRside_hourly, 'o', markersize=1)
idx_oceanside_hourly = np.isfinite(x_oceanside_datenum_hourly) & np.isfinite(y_oceanside_hourly)
idx_HRside_hourly = np.isfinite(x_HRside_datenum_hourly) & np.isfinite(y_HRside_hourly)
z_oceanside_hourly = np.polyfit(x_oceanside_datenum_hourly[idx_oceanside_hourly], y_oceanside_hourly[idx_oceanside_hourly], 1)
z_HRside_hourly = np.polyfit(x_HRside_datenum_hourly[idx_HRside_hourly], y_HRside_hourly[idx_HRside_hourly], 1)
p_oceanside_hourly = np.poly1d(z_oceanside_hourly)
p_HRside_hourly = np.poly1d(z_HRside_hourly)
polyX_oceanside_hourly = np.linspace(x_oceanside_datenum_hourly.min(), x_oceanside_datenum_hourly.max(), 100)
polyX_HRside_hourly = np.linspace(x_HRside_datenum_hourly.min(), x_HRside_datenum_hourly.max(), 100)
pylab.plot(polyX_oceanside_hourly,p_oceanside_hourly(polyX_oceanside_hourly),"c", label='Mean Sea Level')
pylab.plot(polyX_HRside_hourly,p_HRside_hourly(polyX_HRside_hourly),"r", label='Mean River Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_oceanside_hourly[0],z_oceanside_hourly[1]))
print("y=%.6fx+(%.6f)"%(z_HRside_hourly[0],z_HRside_hourly[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
# Concatenate dates and levels
HR_dike_oceanside_levels_hourly = np.vstack((x_oceanside_datenum_hourly, y_oceanside_hourly)).T
HR_dike_HRside_levels_hourly = np.vstack((x_HRside_datenum_hourly, y_HRside_hourly)).T
# max and min vals for tides
HR_dike_oceanside_maxlevels = []
HR_dike_oceanside_minlevels = []
for i in range(len(HR_dike_oceanside_levels_hourly)-2): # length of oceanside and HRside level arrays are the same
if (HR_dike_oceanside_levels_hourly[i+1][1] > HR_dike_oceanside_levels_hourly[i][1]) & (HR_dike_oceanside_levels_hourly[i+2][1] < HR_dike_oceanside_levels_hourly[i+1][1]) & (HR_dike_oceanside_levels_hourly[i+1][1] > p_oceanside(polyX_oceanside).mean()):
HR_dike_oceanside_maxlevels.append([HR_dike_oceanside_levels_hourly[i+1][0], HR_dike_oceanside_levels_hourly[i+1][1]]) # high tides
if (HR_dike_oceanside_levels_hourly[i+1][1] < HR_dike_oceanside_levels_hourly[i][1]) & (HR_dike_oceanside_levels_hourly[i+2][1] > HR_dike_oceanside_levels_hourly[i+1][1]) & (HR_dike_oceanside_levels_hourly[i+1][1] < p_oceanside(polyX_oceanside).mean()):
HR_dike_oceanside_minlevels.append([HR_dike_oceanside_levels_hourly[i+1][0], HR_dike_oceanside_levels_hourly[i+1][1]])
HR_dike_oceanside_maxlevels = np.array(HR_dike_oceanside_maxlevels)
HR_dike_oceanside_minlevels = np.array(HR_dike_oceanside_minlevels)
HR_dike_HRside_maxlevels = []
HR_dike_HRside_minlevels = []
for i in range(len(HR_dike_HRside_levels_hourly)-2): # length of oceanside and HRside level arrays are the same
if (HR_dike_HRside_levels_hourly[i+1][1] > HR_dike_HRside_levels_hourly[i][1]) & (HR_dike_HRside_levels_hourly[i+2][1] < HR_dike_HRside_levels_hourly[i+1][1]) & (HR_dike_HRside_levels_hourly[i+1][1] > p_HRside(polyX_HRside).mean()):
HR_dike_HRside_maxlevels.append([HR_dike_HRside_levels_hourly[i+1][0], HR_dike_HRside_levels_hourly[i+1][1]]) # high tides
if (HR_dike_HRside_levels_hourly[i+1][1] < HR_dike_HRside_levels_hourly[i][1]) & (HR_dike_HRside_levels_hourly[i+2][1] > HR_dike_HRside_levels_hourly[i+1][1]) & (HR_dike_HRside_levels_hourly[i+1][1] < p_HRside(polyX_HRside).mean()):
HR_dike_HRside_minlevels.append([HR_dike_HRside_levels_hourly[i+1][0], HR_dike_HRside_levels_hourly[i+1][1]])
HR_dike_HRside_maxlevels = np.array(HR_dike_HRside_maxlevels)
HR_dike_HRside_minlevels = np.array(HR_dike_HRside_minlevels)
#%% Mean High and Low Dike Levels, Oceanside and HRside
"""
Ocean side of dike mins and maxes (hourly time steps)
"""
x_oceanside_datenum_maxlevels, y_oceanside_maxlevels = HR_dike_oceanside_maxlevels.T
x_oceanside_datenum_minlevels, y_oceanside_minlevels = HR_dike_oceanside_minlevels.T
plt.figure()
pylab.plot(x_oceanside_datenum_maxlevels, y_oceanside_maxlevels, 'o', markersize=1)
pylab.plot(x_oceanside_datenum_minlevels, y_oceanside_minlevels, 'o', markersize=1)
idx_oceanside_max = np.isfinite(x_oceanside_datenum_maxlevels) & np.isfinite(y_oceanside_maxlevels)
idx_oceanside_min = np.isfinite(x_oceanside_datenum_minlevels) & np.isfinite(y_oceanside_minlevels)
z_oceanside_max = np.polyfit(x_oceanside_datenum_maxlevels[idx_oceanside_max], y_oceanside_maxlevels[idx_oceanside_max], 1)
z_oceanside_min = np.polyfit(x_oceanside_datenum_minlevels[idx_oceanside_min], y_oceanside_minlevels[idx_oceanside_min], 1)
p_oceanside_max = np.poly1d(z_oceanside_max)
p_oceanside_min = np.poly1d(z_oceanside_min)
polyX_oceanside_max = np.linspace(x_oceanside_datenum_maxlevels.min(), x_oceanside_datenum_maxlevels.max(), 100)
polyX_oceanside_min = np.linspace(x_oceanside_datenum_minlevels.min(), x_oceanside_datenum_minlevels.max(), 100)
pylab.plot(polyX_oceanside_max,p_oceanside_max(polyX_oceanside_max),"c", label='Mean High Sea Level')
pylab.plot(polyX_oceanside_min,p_oceanside_min(polyX_oceanside_min),"m", label='Mean Low Sea Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_oceanside_max[0],z_oceanside_max[1]))
print("y=%.6fx+(%.6f)"%(z_oceanside_min[0],z_oceanside_min[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
# Max and min of trendlines
sealev_oct2017_max = z_oceanside_max[0]*x_oceanside_datenum_maxlevels.min()+z_oceanside_max[1]
sealev_oct2017_min = z_oceanside_min[0]*x_oceanside_datenum_minlevels.min()+z_oceanside_min[1]
sealev_june2019_max = z_oceanside_max[0]*x_oceanside_datenum_maxlevels.max()+z_oceanside_max[1]
sealev_june2019_min = z_oceanside_min[0]*x_oceanside_datenum_minlevels.max()+z_oceanside_min[1]
slrhigh_oct2017tojune2019 = sealev_june2019_max-sealev_oct2017_max
slrlow_oct2017tojune2019 = sealev_june2019_min-sealev_oct2017_min
slrhigh_oneyear = slrhigh_oct2017tojune2019/(x_oceanside_datenum_maxlevels[-1]-x_oceanside_datenum_maxlevels[0])*365
slrlow_oneyear = slrlow_oct2017tojune2019/(x_oceanside_datenum_minlevels[-1]-x_oceanside_datenum_minlevels[0])*365
print("Maximum mean sea level on the ocean side of the dike is increasing by ", ("%.3f"%(slrhigh_oneyear)), "m per year.")
print("Maximum mean sea level at that location goes from an average of ", ("%.3f"%(sealev_oct2017_max)), "m in October 2017")
print("to ", ("%.3f"%(sealev_june2019_max)), "m in June 2019.")
print("Minimum mean sea level on the ocean side of the dike is increasing by ", ("%.3f"%(slrlow_oneyear)), "m per year.")
print("Minimum mean sea level at that location goes from an average of ", ("%.3f"%(sealev_oct2017_min)), "m in October 2017")
print("to ", ("%.3f"%(sealev_june2019_min)), "m in June 2019. This mean is primarily influenced by the Herring River.")
# Amplitudes and ranges
amp_oceanside = p_oceanside_max(polyX_oceanside_max) - p_oceanside(polyX_oceanside)
amp_oceanside_avg = amp_oceanside.mean()
range_oceanside = p_oceanside_max(polyX_oceanside_max) - p_oceanside_min(polyX_oceanside_min)
range_oceanside_avg = range_oceanside.mean()
print("Average tidal range between October 2017 and June 2019 on the ocean side of the dike is ", ("%.3f"%(range_oceanside_avg)), " m.")
"""
HR side of dike mins and maxes (hourly time steps)
"""
x_HRside_datenum_maxlevels, y_HRside_maxlevels = HR_dike_HRside_maxlevels.T
x_HRside_datenum_minlevels, y_HRside_minlevels = HR_dike_HRside_minlevels.T
pylab.plot(x_HRside_datenum_maxlevels, y_HRside_maxlevels, 'o', markersize=1)
pylab.plot(x_HRside_datenum_minlevels, y_HRside_minlevels, 'o', markersize=1)
idx_HRside_max = np.isfinite(x_HRside_datenum_maxlevels) & np.isfinite(y_HRside_maxlevels)
idx_HRside_min = np.isfinite(x_HRside_datenum_minlevels) & np.isfinite(y_HRside_minlevels)
z_HRside_max = np.polyfit(x_HRside_datenum_maxlevels[idx_HRside_max], y_HRside_maxlevels[idx_HRside_max], 1)
z_HRside_min = np.polyfit(x_HRside_datenum_minlevels[idx_HRside_min], y_HRside_minlevels[idx_HRside_min], 1)
p_HRside_max = np.poly1d(z_HRside_max)
p_HRside_min = np.poly1d(z_HRside_min)
polyX_HRside_max = np.linspace(x_HRside_datenum_maxlevels.min(), x_HRside_datenum_maxlevels.max(), 100)
polyX_HRside_min = np.linspace(x_HRside_datenum_minlevels.min(), x_HRside_datenum_minlevels.max(), 100)
pylab.plot(polyX_HRside_max,p_HRside_max(polyX_HRside_max),"g", label='Mean High River Level')
pylab.plot(polyX_HRside_min,p_HRside_min(polyX_HRside_min),"r", label='Mean Low River Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_HRside_max[0],z_HRside_max[1]))
print("y=%.6fx+(%.6f)"%(z_HRside_min[0],z_HRside_min[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
print("There are ", y_oceanside_maxlevels.size, "data points for maximum tidal stage on the ocean side of the dike.")
print("There are ", y_oceanside_minlevels.size, "data points for minimum tidal stage on the ocean side of the dike.")
print("There are ", y_HRside_maxlevels.size, "data points for maximum stage on the river side of the dike.")
print("There are ", y_HRside_minlevels.size, "data points for minimum stage on the river side of the dike.")
# Max and min of trendlines
rivlev_june2015_max = z_HRside_max[0]*x_HRside_datenum_maxlevels.min()+z_HRside_max[1]
rivlev_june2015_min = z_HRside_min[0]*x_HRside_datenum_minlevels.min()+z_HRside_min[1]
rivlev_july2019_max = z_HRside_max[0]*x_HRside_datenum_maxlevels.max()+z_HRside_max[1]
rivlev_july2019_min = z_HRside_min[0]*x_HRside_datenum_minlevels.max()+z_HRside_min[1]
rlrhigh_june2015tojuly2019 = rivlev_july2019_max-rivlev_june2015_max
rlrlow_june2015tojuly2019 = rivlev_july2019_min-rivlev_june2015_min
rlrhigh_oneyear = rlrhigh_june2015tojuly2019/(x_HRside_datenum_maxlevels[-1]-x_HRside_datenum_maxlevels[0])*365
rlrlow_oneyear = rlrlow_june2015tojuly2019/(x_HRside_datenum_minlevels[-1]-x_HRside_datenum_minlevels[0])*365
print("Maximum mean river level on the river side of the dike is increasing by ", ("%.3f"%(rlrhigh_oneyear)), "m per year.")
print("Maximum mean river level at that location goes from an average of ", ("%.3f"%(rivlev_june2015_max)), "m in June 2015")
print("to ", ("%.3f"%(rivlev_july2019_max)), "m in July 2019.")
print("Minimum mean river level on the river side of the dike is increasing by ", ("%.3f"%(rlrlow_oneyear)), "m per year.")
print("Minimum mean river level at that location goes from an average of ", ("%.3f"%(rivlev_june2015_min)), "m in June 2015")
print("to ", ("%.3f"%(rivlev_july2019_min)), "m in July 2019.")
# Amplitudes and ranges
amp_HRside = p_HRside_max(polyX_HRside_max) - p_HRside(polyX_HRside)
amp_HRside_avg = amp_HRside.mean()
range_HRside = p_HRside_max(polyX_HRside_max) - p_HRside_min(polyX_HRside_min)
range_HRside_avg = range_HRside.mean()
print("Average tidal range between June 2015 and July 2019 on the river side of the dike is ", ("%.3f"%(range_HRside_avg)), " m.")
range_HRside_june2015 = p_HRside_max(polyX_HRside_max).min() - p_HRside_min(polyX_HRside_min).min()
range_HRside_july2019 = p_HRside_max(polyX_HRside_max).max() - p_HRside_min(polyX_HRside_min).max()
range_oceanside_june2015 = p_oceanside_max(polyX_HRside_max).min() - p_oceanside_min(polyX_HRside_min).min()
range_oceanside_july2019 = p_oceanside_max(polyX_HRside_max).max() - p_oceanside_min(polyX_HRside_min).max()
#%% NPS CTD Sensor Data
with open(os.path.join(data_dir,"Surface Water Level Data","NPS CTD Sensors","Water_Elevation,_NAVD88-File_Import-07-17-2019_17-29.csv")) as f:
reader = csv.reader(f, delimiter=",")
NPS_CTD_all_levels = list(reader)
HighToss_levels = []
MillCreek_levels = []
CNRUS_levels = []
DogLeg_levels = []
OldSaw_levels = []
for line in range(len(NPS_CTD_all_levels)-3):
HighToss_levels.append([NPS_CTD_all_levels[line+3][0],NPS_CTD_all_levels[line+3][2]])
MillCreek_levels.append([NPS_CTD_all_levels[line+3][0],NPS_CTD_all_levels[line+3][3]])
CNRUS_levels.append([NPS_CTD_all_levels[line+3][0],NPS_CTD_all_levels[line+3][4]])
DogLeg_levels.append([NPS_CTD_all_levels[line+3][0],NPS_CTD_all_levels[line+3][5]])
OldSaw_levels.append([NPS_CTD_all_levels[line+3][0],NPS_CTD_all_levels[line+3][6]])
HighToss_levels = np.array(HighToss_levels)
MillCreek_levels = np.array(MillCreek_levels)
CNRUS_levels = np.array(CNRUS_levels)
DogLeg_levels = np.array(DogLeg_levels)
OldSaw_levels = np.array(OldSaw_levels)
#%% NPS CTD CNR U/S Salinity
with open(os.path.join(data_dir,"Salinity and Conductance Data","CNRUS_ctd_salinity.csv")) as f:
reader = csv.reader(f, delimiter=",")
NPS_CTD_all_salinity = list(reader)
CNRUS_salinity = []
for line in range(len(NPS_CTD_all_salinity)-1):
CNRUS_salinity.append([NPS_CTD_all_salinity[line+1][0],NPS_CTD_all_salinity[line+1][1]])
CNRUS_salinity = np.array(CNRUS_salinity)
x_CNRUS_sal, y_CNRUS_sal = CNRUS_salinity.T
dates_CNRUS_sal = [dateutil.parser.parse(x) for x in x_CNRUS_sal]
x_CNRUS_sal_datenum = mdates.date2num(dates_CNRUS_sal)
y_CNRUS_sal[np.where(y_CNRUS_sal == '')] = np.nan
y_CNRUS_sal = y_CNRUS_sal.astype(np.float)
idx_CNRUS_sal = np.isfinite(x_CNRUS_sal_datenum) & np.isfinite(y_CNRUS_sal)
z_CNRUS_sal = np.polyfit(x_CNRUS_sal_datenum[idx_CNRUS_sal], y_CNRUS_sal[idx_CNRUS_sal], 1)
p_CNRUS_sal = np.poly1d(z_CNRUS_sal)
polyX_CNRUS_sal = np.linspace(x_CNRUS_sal_datenum.min(), x_CNRUS_sal_datenum.max(), 100)
nan_indices_CNRUS_sal = []
for i in range(len(y_CNRUS_sal)):
if np.isnan(y_CNRUS_sal[i]):
nan_indices_CNRUS_sal.append(i)
y_CNRUS_sal_nonans = y_CNRUS_sal.tolist()
x_CNRUS_sal_datenum_nonans = x_CNRUS_sal_datenum.tolist()
for index in sorted(nan_indices_CNRUS_sal, reverse=True):
del y_CNRUS_sal_nonans[index]
del x_CNRUS_sal_datenum_nonans[index]
y_CNRUS_sal_nonans = np.array(y_CNRUS_sal_nonans)
x_CNRUS_sal_datenum_nonans = np.array(x_CNRUS_sal_datenum_nonans)
dates_CNRUS_sal_nonans = mdates.num2date(x_CNRUS_sal_datenum_nonans)
hourly_indices_CNRUS_sal = []
for i in range(len(dates_CNRUS_sal_nonans)):
if dates_CNRUS_sal_nonans[i].minute == 0:
hourly_indices_CNRUS_sal.append(i)
y_CNRUS_sal_hourly = []
x_CNRUS_sal_datenum_hourly = []
for index in sorted(hourly_indices_CNRUS_sal):
y_CNRUS_sal_hourly.append(y_CNRUS_sal_nonans[index])
x_CNRUS_sal_datenum_hourly.append(x_CNRUS_sal_datenum_nonans[index])
y_CNRUS_sal_hourly = np.array(y_CNRUS_sal_hourly)
x_CNRUS_sal_datenum_hourly = np.array(x_CNRUS_sal_datenum_hourly)
HR_dike_CNRUS_sal_levels_hourly = np.vstack((x_CNRUS_sal_datenum_hourly, y_CNRUS_sal_hourly)).T
# max and min vals for tides
HR_dike_CNRUS_sal_maxlevels = []
HR_dike_CNRUS_sal_minlevels = []
for i in range(len(HR_dike_CNRUS_sal_levels_hourly)-2):
if (HR_dike_CNRUS_sal_levels_hourly[i+1][1] > HR_dike_CNRUS_sal_levels_hourly[i][1]) & (HR_dike_CNRUS_sal_levels_hourly[i+2][1] < HR_dike_CNRUS_sal_levels_hourly[i+1][1]) & (HR_dike_CNRUS_sal_levels_hourly[i+1][1] > p_CNRUS_sal(polyX_CNRUS_sal).mean()):
HR_dike_CNRUS_sal_maxlevels.append([HR_dike_CNRUS_sal_levels_hourly[i+1][0], HR_dike_CNRUS_sal_levels_hourly[i+1][1]]) # high tides
if (HR_dike_CNRUS_sal_levels_hourly[i+1][1] < HR_dike_CNRUS_sal_levels_hourly[i][1]) & (HR_dike_CNRUS_sal_levels_hourly[i+2][1] > HR_dike_CNRUS_sal_levels_hourly[i+1][1]) & (HR_dike_CNRUS_sal_levels_hourly[i+1][1] < p_CNRUS_sal(polyX_CNRUS_sal).mean()):
HR_dike_CNRUS_sal_minlevels.append([HR_dike_CNRUS_sal_levels_hourly[i+1][0], HR_dike_CNRUS_sal_levels_hourly[i+1][1]])
HR_dike_CNRUS_sal_maxlevels = np.array(HR_dike_CNRUS_sal_maxlevels)
HR_dike_CNRUS_sal_minlevels = np.array(HR_dike_CNRUS_sal_minlevels)
x_CNRUS_sal_datenum_maxlevels, y_CNRUS_sal_maxlevels = HR_dike_CNRUS_sal_maxlevels.T
x_CNRUS_sal_datenum_minlevels, y_CNRUS_sal_minlevels = HR_dike_CNRUS_sal_minlevels.T
# plots
plt.figure()
pylab.plot(x_CNRUS_sal_datenum_maxlevels, y_CNRUS_sal_maxlevels, 'go', markersize=1)
pylab.plot(x_CNRUS_sal_datenum_minlevels, y_CNRUS_sal_minlevels, 'mo', markersize=1)
# trendlines
idx_CNRUS_sal_max = np.isfinite(x_CNRUS_sal_datenum_maxlevels) & np.isfinite(y_CNRUS_sal_maxlevels)
idx_CNRUS_sal_min = np.isfinite(x_CNRUS_sal_datenum_minlevels) & np.isfinite(y_CNRUS_sal_minlevels)
z_CNRUS_sal_max = np.polyfit(x_CNRUS_sal_datenum_maxlevels[idx_CNRUS_sal_max], y_CNRUS_sal_maxlevels[idx_CNRUS_sal_max], 1)
z_CNRUS_sal_min = np.polyfit(x_CNRUS_sal_datenum_minlevels[idx_CNRUS_sal_min], y_CNRUS_sal_minlevels[idx_CNRUS_sal_min], 1)
p_CNRUS_sal_max = np.poly1d(z_CNRUS_sal_max)
p_CNRUS_sal_min = np.poly1d(z_CNRUS_sal_min)
# plotted trendlines
polyX_CNRUS_sal_max = np.linspace(x_CNRUS_sal_datenum_maxlevels.min(), x_CNRUS_sal_datenum_maxlevels.max(), 100)
polyX_CNRUS_sal_min = np.linspace(x_CNRUS_sal_datenum_minlevels.min(), x_CNRUS_sal_datenum_minlevels.max(), 100)
pylab.plot(polyX_CNRUS_sal_max,p_CNRUS_sal_max(polyX_CNRUS_sal_max),"lightgreen", label='High Tide')
pylab.plot(polyX_CNRUS_sal_min,p_CNRUS_sal_min(polyX_CNRUS_sal_min),"mediumpurple", label='Low Tide')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_CNRUS_sal_max[0],z_CNRUS_sal_max[1]))
print("y=%.6fx+(%.6f)"%(z_CNRUS_sal_min[0],z_CNRUS_sal_min[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel(r'Date $\left[YYYY-MM\right]$', fontsize=26)
plt.ylabel('Salinity at CNR U/S (ppt)', fontsize=26)
plt.legend(loc='best', fontsize=22)
pylab.show()
#%% Plots of NPS CTD Sensor Data
"""
NPS CTD Data, Plotted: Old Saw -> CNR U/S -> Mill Creek -> Dog Leg -> High Toss
"""
x_HighToss, y_HighToss = HighToss_levels.T
x_MillCreek, y_MillCreek = MillCreek_levels.T
x_CNRUS, y_CNRUS = CNRUS_levels.T
x_DogLeg, y_DogLeg = DogLeg_levels.T
x_OldSaw, y_OldSaw = OldSaw_levels.T
# parse dates and convert to number format, replace blanks with nans
dates_HighToss = [dateutil.parser.parse(x) for x in x_HighToss]
x_HighToss_datenum = mdates.date2num(dates_HighToss)
y_HighToss[np.where(y_HighToss == '')] = np.nan
y_HighToss = y_HighToss.astype(np.float)
dates_MillCreek = [dateutil.parser.parse(x) for x in x_MillCreek]
x_MillCreek_datenum = mdates.date2num(dates_MillCreek)
y_MillCreek[np.where(y_MillCreek == '')] = np.nan
y_MillCreek = y_MillCreek.astype(np.float)
dates_CNRUS = [dateutil.parser.parse(x) for x in x_CNRUS]
x_CNRUS_datenum = mdates.date2num(dates_CNRUS)
y_CNRUS[np.where(y_CNRUS == '')] = np.nan
y_CNRUS = y_CNRUS.astype(np.float)
dates_DogLeg = [dateutil.parser.parse(x) for x in x_DogLeg]
x_DogLeg_datenum = mdates.date2num(dates_DogLeg)
y_DogLeg[np.where(y_DogLeg == '')] = np.nan
y_DogLeg = y_DogLeg.astype(np.float)
dates_OldSaw = [dateutil.parser.parse(x) for x in x_OldSaw]
x_OldSaw_datenum = mdates.date2num(dates_OldSaw)
y_OldSaw[np.where(y_OldSaw == '')] = np.nan
y_OldSaw = y_OldSaw.astype(np.float)
# plot all (default order = blue, orange, green, red, purple)
pylab.plot(x_HighToss_datenum, y_HighToss, 'o', markersize=1)
pylab.plot(x_MillCreek_datenum, y_MillCreek, 'o', markersize=1)
pylab.plot(x_CNRUS_datenum, y_CNRUS, 'o', markersize=1)
pylab.plot(x_DogLeg_datenum, y_DogLeg, 'o', markersize=1)
pylab.plot(x_OldSaw_datenum, y_OldSaw, 'o', markersize=1)
idx_HighToss = np.isfinite(x_HighToss_datenum) & np.isfinite(y_HighToss)
z_HighToss = np.polyfit(x_HighToss_datenum[idx_HighToss], y_HighToss[idx_HighToss], 1)
p_HighToss = np.poly1d(z_HighToss)
idx_MillCreek = np.isfinite(x_MillCreek_datenum) & np.isfinite(y_MillCreek)
z_MillCreek = np.polyfit(x_MillCreek_datenum[idx_MillCreek], y_MillCreek[idx_MillCreek], 1)
p_MillCreek = np.poly1d(z_MillCreek)
idx_CNRUS = np.isfinite(x_CNRUS_datenum) & np.isfinite(y_CNRUS)
z_CNRUS = np.polyfit(x_CNRUS_datenum[idx_CNRUS], y_CNRUS[idx_CNRUS], 1)
p_CNRUS = np.poly1d(z_CNRUS)
idx_DogLeg = np.isfinite(x_DogLeg_datenum) & np.isfinite(y_DogLeg)
z_DogLeg = np.polyfit(x_DogLeg_datenum[idx_DogLeg], y_DogLeg[idx_DogLeg], 1)
p_DogLeg = np.poly1d(z_DogLeg)
idx_OldSaw = np.isfinite(x_OldSaw_datenum) & np.isfinite(y_OldSaw)
z_OldSaw = np.polyfit(x_OldSaw_datenum[idx_OldSaw], y_OldSaw[idx_OldSaw], 1)
p_OldSaw = np.poly1d(z_OldSaw)
polyX_HighToss = np.linspace(x_HighToss_datenum.min(), x_HighToss_datenum.max(), 100)
pylab.plot(polyX_HighToss,p_HighToss(polyX_HighToss),"c", label='Mean High Toss Level')
polyX_MillCreek = np.linspace(x_MillCreek_datenum.min(), x_MillCreek_datenum.max(), 100)
pylab.plot(polyX_MillCreek,p_MillCreek(polyX_MillCreek),"y", label='Mean Mill Creek Level')
polyX_CNRUS = np.linspace(x_CNRUS_datenum.min(), x_CNRUS_datenum.max(), 100)
pylab.plot(polyX_CNRUS,p_CNRUS(polyX_CNRUS),"lime", label='Mean HR near-dike Level')
polyX_DogLeg = np.linspace(x_DogLeg_datenum.min(), x_DogLeg_datenum.max(), 100)
pylab.plot(polyX_DogLeg,p_DogLeg(polyX_DogLeg),"salmon", label='Mean Dog Leg Level')
polyX_OldSaw = np.linspace(x_OldSaw_datenum.min(), x_OldSaw_datenum.max(), 100)
pylab.plot(polyX_OldSaw,p_OldSaw(polyX_OldSaw),"m", label='Mean Old Saw (Wellfleet) Sea Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_HighToss[0],z_HighToss[1]))
print("y=%.6fx+(%.6f)"%(z_MillCreek[0],z_MillCreek[1]))
print("y=%.6fx+(%.6f)"%(z_CNRUS[0],z_CNRUS[1]))
print("y=%.6fx+(%.6f)"%(z_DogLeg[0],z_DogLeg[1]))
print("y=%.6fx+(%.6f)"%(z_OldSaw[0],z_OldSaw[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
#%% CTD Data Means
# Need to remove nan vals and reduce measurement frequency for oceanside data
nan_indices_HighToss = []
nan_indices_MillCreek = []
nan_indices_CNRUS = []
nan_indices_DogLeg = []
nan_indices_OldSaw = []
for i in range(len(y_HighToss)): # HighToss, MillCreek, CNRUS, DogLeg, and OldSaw all the same length
if np.isnan(y_HighToss[i]):
nan_indices_HighToss.append(i)
if np.isnan(y_MillCreek[i]):
nan_indices_MillCreek.append(i)
if np.isnan(y_CNRUS[i]):
nan_indices_CNRUS.append(i)
if np.isnan(y_DogLeg[i]):
nan_indices_DogLeg.append(i)
if np.isnan(y_OldSaw[i]):
nan_indices_OldSaw.append(i)
y_HighToss_nonans = y_HighToss.tolist()
x_HighToss_datenum_nonans = x_HighToss_datenum.tolist()
for index in sorted(nan_indices_HighToss, reverse=True):
del y_HighToss_nonans[index]
del x_HighToss_datenum_nonans[index]
y_HighToss_nonans = np.array(y_HighToss_nonans)
x_HighToss_datenum_nonans = np.array(x_HighToss_datenum_nonans)
y_MillCreek_nonans = y_MillCreek.tolist()
x_MillCreek_datenum_nonans = x_MillCreek_datenum.tolist()
for index in sorted(nan_indices_MillCreek, reverse=True):
del y_MillCreek_nonans[index]
del x_MillCreek_datenum_nonans[index]
y_MillCreek_nonans = np.array(y_MillCreek_nonans)
x_MillCreek_datenum_nonans = np.array(x_MillCreek_datenum_nonans)
y_CNRUS_nonans = y_CNRUS.tolist()
x_CNRUS_datenum_nonans = x_CNRUS_datenum.tolist()
for index in sorted(nan_indices_CNRUS, reverse=True):
del y_CNRUS_nonans[index]
del x_CNRUS_datenum_nonans[index]
y_CNRUS_nonans = np.array(y_CNRUS_nonans)
x_CNRUS_datenum_nonans = np.array(x_CNRUS_datenum_nonans)
y_DogLeg_nonans = y_DogLeg.tolist()
x_DogLeg_datenum_nonans = x_DogLeg_datenum.tolist()
for index in sorted(nan_indices_DogLeg, reverse=True):
del y_DogLeg_nonans[index]
del x_DogLeg_datenum_nonans[index]
y_DogLeg_nonans = np.array(y_DogLeg_nonans)
x_DogLeg_datenum_nonans = np.array(x_DogLeg_datenum_nonans)
y_OldSaw_nonans = y_OldSaw.tolist()
x_OldSaw_datenum_nonans = x_OldSaw_datenum.tolist()
for index in sorted(nan_indices_OldSaw, reverse=True):
del y_OldSaw_nonans[index]
del x_OldSaw_datenum_nonans[index]
y_OldSaw_nonans = np.array(y_OldSaw_nonans)
x_OldSaw_datenum_nonans = np.array(x_OldSaw_datenum_nonans)
# convert numbered datetime back to standard
dates_HighToss_nonans = mdates.num2date(x_HighToss_datenum_nonans)
dates_MillCreek_nonans = mdates.num2date(x_MillCreek_datenum_nonans)
dates_CNRUS_nonans = mdates.num2date(x_CNRUS_datenum_nonans)
dates_DogLeg_nonans = mdates.num2date(x_DogLeg_datenum_nonans)
dates_OldSaw_nonans = mdates.num2date(x_OldSaw_datenum_nonans)
# convert to hourly time intervals
# High Toss
hourly_indices_HighToss = []
for i in range(len(dates_HighToss_nonans)):
if dates_HighToss_nonans[i].minute == 0:
hourly_indices_HighToss.append(i)
y_HighToss_hourly = []
x_HighToss_datenum_hourly = []
for index in sorted(hourly_indices_HighToss):
y_HighToss_hourly.append(y_HighToss_nonans[index])
x_HighToss_datenum_hourly.append(x_HighToss_datenum_nonans[index])
y_HighToss_hourly = np.array(y_HighToss_hourly)
x_HighToss_datenum_hourly = np.array(x_HighToss_datenum_hourly)
# Mill Creek
hourly_indices_MillCreek = []
for i in range(len(dates_MillCreek_nonans)):
if dates_MillCreek_nonans[i].minute == 0:
hourly_indices_MillCreek.append(i)
y_MillCreek_hourly = []
x_MillCreek_datenum_hourly = []
for index in sorted(hourly_indices_MillCreek):
y_MillCreek_hourly.append(y_MillCreek_nonans[index])
x_MillCreek_datenum_hourly.append(x_MillCreek_datenum_nonans[index])
y_MillCreek_hourly = np.array(y_MillCreek_hourly)
x_MillCreek_datenum_hourly = np.array(x_MillCreek_datenum_hourly)
# CNRUS
hourly_indices_CNRUS = []
for i in range(len(dates_CNRUS_nonans)):
if dates_CNRUS_nonans[i].minute == 0:
hourly_indices_CNRUS.append(i)
y_CNRUS_hourly = []
x_CNRUS_datenum_hourly = []
for index in sorted(hourly_indices_CNRUS):
y_CNRUS_hourly.append(y_CNRUS_nonans[index])
x_CNRUS_datenum_hourly.append(x_CNRUS_datenum_nonans[index])
y_CNRUS_hourly = np.array(y_CNRUS_hourly)
x_CNRUS_datenum_hourly = np.array(x_CNRUS_datenum_hourly)
# Dog Leg
hourly_indices_DogLeg = []
for i in range(len(dates_DogLeg_nonans)):
if dates_DogLeg_nonans[i].minute == 0:
hourly_indices_DogLeg.append(i)
y_DogLeg_hourly = []
x_DogLeg_datenum_hourly = []
for index in sorted(hourly_indices_DogLeg):
y_DogLeg_hourly.append(y_DogLeg_nonans[index])
x_DogLeg_datenum_hourly.append(x_DogLeg_datenum_nonans[index])
y_DogLeg_hourly = np.array(y_DogLeg_hourly)
x_DogLeg_datenum_hourly = np.array(x_DogLeg_datenum_hourly)
# Old Saw
hourly_indices_OldSaw = []
for i in range(len(dates_OldSaw_nonans)):
if dates_OldSaw_nonans[i].minute == 0:
hourly_indices_OldSaw.append(i)
y_OldSaw_hourly = []
x_OldSaw_datenum_hourly = []
for index in sorted(hourly_indices_OldSaw):
y_OldSaw_hourly.append(y_OldSaw_nonans[index])
x_OldSaw_datenum_hourly.append(x_OldSaw_datenum_nonans[index])
y_OldSaw_hourly = np.array(y_OldSaw_hourly)
x_OldSaw_datenum_hourly = np.array(x_OldSaw_datenum_hourly)
# plot hourly
plt.figure()
pylab.plot(x_HighToss_datenum_hourly, y_HighToss_hourly, 'o', markersize=1)
pylab.plot(x_MillCreek_datenum_hourly, y_MillCreek_hourly, 'o', markersize=1)
pylab.plot(x_CNRUS_datenum_hourly, y_CNRUS_hourly, 'o', markersize=1)
pylab.plot(x_DogLeg_datenum_hourly, y_DogLeg_hourly, 'o', markersize=1)
pylab.plot(x_OldSaw_datenum_hourly, y_OldSaw_hourly, 'o', markersize=1)
# High Toss Trendline
idx_HighToss_hourly = np.isfinite(x_HighToss_datenum_hourly) & np.isfinite(y_HighToss_hourly)
z_HighToss_hourly = np.polyfit(x_HighToss_datenum_hourly[idx_HighToss_hourly], y_HighToss_hourly[idx_HighToss_hourly], 1)
p_HighToss_hourly = np.poly1d(z_HighToss_hourly)
# Mill Creek Trendline
idx_MillCreek_hourly = np.isfinite(x_MillCreek_datenum_hourly) & np.isfinite(y_MillCreek_hourly)
z_MillCreek_hourly = np.polyfit(x_MillCreek_datenum_hourly[idx_MillCreek_hourly], y_MillCreek_hourly[idx_MillCreek_hourly], 1)
p_MillCreek_hourly = np.poly1d(z_MillCreek_hourly)
# CNRUS Trendline
idx_CNRUS_hourly = np.isfinite(x_CNRUS_datenum_hourly) & np.isfinite(y_CNRUS_hourly)
z_CNRUS_hourly = np.polyfit(x_CNRUS_datenum_hourly[idx_CNRUS_hourly], y_CNRUS_hourly[idx_CNRUS_hourly], 1)
p_CNRUS_hourly = np.poly1d(z_CNRUS_hourly)
# Dog Leg Trendline
idx_DogLeg_hourly = np.isfinite(x_DogLeg_datenum_hourly) & np.isfinite(y_DogLeg_hourly)
z_DogLeg_hourly = np.polyfit(x_DogLeg_datenum_hourly[idx_DogLeg_hourly], y_DogLeg_hourly[idx_DogLeg_hourly], 1)
p_DogLeg_hourly = np.poly1d(z_DogLeg_hourly)
# Old Saw Trendline
idx_OldSaw_hourly = np.isfinite(x_OldSaw_datenum_hourly) & np.isfinite(y_OldSaw_hourly)
z_OldSaw_hourly = np.polyfit(x_OldSaw_datenum_hourly[idx_OldSaw_hourly], y_OldSaw_hourly[idx_OldSaw_hourly], 1)
p_OldSaw_hourly = np.poly1d(z_OldSaw_hourly)
# Trendlines plotted
polyX_HighToss_hourly = np.linspace(x_HighToss_datenum_hourly.min(), x_HighToss_datenum_hourly.max(), 100)
polyX_MillCreek_hourly = np.linspace(x_MillCreek_datenum_hourly.min(), x_MillCreek_datenum_hourly.max(), 100)
polyX_CNRUS_hourly = np.linspace(x_CNRUS_datenum_hourly.min(), x_CNRUS_datenum_hourly.max(), 100)
polyX_DogLeg_hourly = np.linspace(x_DogLeg_datenum_hourly.min(), x_DogLeg_datenum_hourly.max(), 100)
polyX_OldSaw_hourly = np.linspace(x_OldSaw_datenum_hourly.min(), x_OldSaw_datenum_hourly.max(), 100)
pylab.plot(polyX_HighToss_hourly,p_HighToss_hourly(polyX_HighToss_hourly),"c", label='Mean Hourly High Toss Level')
pylab.plot(polyX_MillCreek_hourly,p_MillCreek_hourly(polyX_MillCreek_hourly),"y", label='Mean Hourly Mill Creek Level')
pylab.plot(polyX_CNRUS_hourly,p_CNRUS_hourly(polyX_CNRUS_hourly),"lime", label='Mean Hourly CNRUS Level')
pylab.plot(polyX_DogLeg_hourly,p_DogLeg_hourly(polyX_DogLeg_hourly),"salmon", label='Mean Hourly Dog Leg Level')
pylab.plot(polyX_OldSaw_hourly,p_OldSaw_hourly(polyX_OldSaw_hourly),"m", label='Mean Hourly Old Saw Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_HighToss_hourly[0],z_HighToss_hourly[1]))
print("y=%.6fx+(%.6f)"%(z_MillCreek_hourly[0],z_MillCreek_hourly[1]))
print("y=%.6fx+(%.6f)"%(z_CNRUS_hourly[0],z_CNRUS_hourly[1]))
print("y=%.6fx+(%.6f)"%(z_DogLeg_hourly[0],z_DogLeg_hourly[1]))
print("y=%.6fx+(%.6f)"%(z_OldSaw_hourly[0],z_OldSaw_hourly[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
#%% CTD Data Highs and Lows
"""
Mean Highs and Lows (to determine changes in amplitude and amplitude decay from ocean to river)
"""
# Concatenate dates and levels
HR_dike_HighToss_levels_hourly = np.vstack((x_HighToss_datenum_hourly, y_HighToss_hourly)).T
HR_dike_MillCreek_levels_hourly = np.vstack((x_MillCreek_datenum_hourly, y_MillCreek_hourly)).T
HR_dike_CNRUS_levels_hourly = np.vstack((x_CNRUS_datenum_hourly, y_CNRUS_hourly)).T
HR_dike_DogLeg_levels_hourly = np.vstack((x_DogLeg_datenum_hourly, y_DogLeg_hourly)).T
HR_dike_OldSaw_levels_hourly = np.vstack((x_OldSaw_datenum_hourly, y_OldSaw_hourly)).T
# max and min vals for tides
HR_dike_HighToss_maxlevels = []
HR_dike_HighToss_minlevels = []
for i in range(len(HR_dike_HighToss_levels_hourly)-2):
if (HR_dike_HighToss_levels_hourly[i+1][1] > HR_dike_HighToss_levels_hourly[i][1]) & (HR_dike_HighToss_levels_hourly[i+2][1] < HR_dike_HighToss_levels_hourly[i+1][1]) & (HR_dike_HighToss_levels_hourly[i+1][1] > p_HighToss(polyX_HighToss).mean()):
HR_dike_HighToss_maxlevels.append([HR_dike_HighToss_levels_hourly[i+1][0], HR_dike_HighToss_levels_hourly[i+1][1]]) # high tides
if (HR_dike_HighToss_levels_hourly[i+1][1] < HR_dike_HighToss_levels_hourly[i][1]) & (HR_dike_HighToss_levels_hourly[i+2][1] > HR_dike_HighToss_levels_hourly[i+1][1]) & (HR_dike_HighToss_levels_hourly[i+1][1] < p_HighToss(polyX_HighToss).mean()):
HR_dike_HighToss_minlevels.append([HR_dike_HighToss_levels_hourly[i+1][0], HR_dike_HighToss_levels_hourly[i+1][1]])
HR_dike_HighToss_maxlevels = np.array(HR_dike_HighToss_maxlevels)
HR_dike_HighToss_minlevels = np.array(HR_dike_HighToss_minlevels)
HR_dike_MillCreek_maxlevels = [] # these are seasonal, not tidal
HR_dike_MillCreek_minlevels = [] # these are seasonal, not tidal
for i in range(len(HR_dike_MillCreek_levels_hourly)-2):
if (HR_dike_MillCreek_levels_hourly[i+1][1] > HR_dike_MillCreek_levels_hourly[i][1]) & (HR_dike_MillCreek_levels_hourly[i+2][1] < HR_dike_MillCreek_levels_hourly[i+1][1]) & (HR_dike_MillCreek_levels_hourly[i+1][1] > p_MillCreek(polyX_MillCreek).mean()):
HR_dike_MillCreek_maxlevels.append([HR_dike_MillCreek_levels_hourly[i+1][0], HR_dike_MillCreek_levels_hourly[i+1][1]]) # high tides
if (HR_dike_MillCreek_levels_hourly[i+1][1] < HR_dike_MillCreek_levels_hourly[i][1]) & (HR_dike_MillCreek_levels_hourly[i+2][1] > HR_dike_MillCreek_levels_hourly[i+1][1]) & (HR_dike_MillCreek_levels_hourly[i+1][1] < p_MillCreek(polyX_MillCreek).mean()):
HR_dike_MillCreek_minlevels.append([HR_dike_MillCreek_levels_hourly[i+1][0], HR_dike_MillCreek_levels_hourly[i+1][1]])
HR_dike_MillCreek_maxlevels = np.array(HR_dike_MillCreek_maxlevels) # these are seasonal, not tidal
HR_dike_MillCreek_minlevels = np.array(HR_dike_MillCreek_minlevels) # these are seasonal, not tidal
HR_dike_CNRUS_maxlevels = []
HR_dike_CNRUS_minlevels = []
for i in range(len(HR_dike_CNRUS_levels_hourly)-2):
if (HR_dike_CNRUS_levels_hourly[i+1][1] > HR_dike_CNRUS_levels_hourly[i][1]) & (HR_dike_CNRUS_levels_hourly[i+2][1] < HR_dike_CNRUS_levels_hourly[i+1][1]) & (HR_dike_CNRUS_levels_hourly[i+1][1] > p_CNRUS(polyX_CNRUS).mean()):
HR_dike_CNRUS_maxlevels.append([HR_dike_CNRUS_levels_hourly[i+1][0], HR_dike_CNRUS_levels_hourly[i+1][1]]) # high tides
if (HR_dike_CNRUS_levels_hourly[i+1][1] < HR_dike_CNRUS_levels_hourly[i][1]) & (HR_dike_CNRUS_levels_hourly[i+2][1] > HR_dike_CNRUS_levels_hourly[i+1][1]) & (HR_dike_CNRUS_levels_hourly[i+1][1] < p_CNRUS(polyX_CNRUS).mean()):
HR_dike_CNRUS_minlevels.append([HR_dike_CNRUS_levels_hourly[i+1][0], HR_dike_CNRUS_levels_hourly[i+1][1]])
HR_dike_CNRUS_maxlevels = np.array(HR_dike_CNRUS_maxlevels)
HR_dike_CNRUS_minlevels = np.array(HR_dike_CNRUS_minlevels)
HR_dike_DogLeg_maxlevels = []
HR_dike_DogLeg_minlevels = []
for i in range(len(HR_dike_DogLeg_levels_hourly)-2):
if (HR_dike_DogLeg_levels_hourly[i+1][1] > HR_dike_DogLeg_levels_hourly[i][1]) & (HR_dike_DogLeg_levels_hourly[i+2][1] < HR_dike_DogLeg_levels_hourly[i+1][1]) & (HR_dike_DogLeg_levels_hourly[i+1][1] > p_DogLeg(polyX_DogLeg).mean()):
HR_dike_DogLeg_maxlevels.append([HR_dike_DogLeg_levels_hourly[i+1][0], HR_dike_DogLeg_levels_hourly[i+1][1]]) # high tides
if (HR_dike_DogLeg_levels_hourly[i+1][1] < HR_dike_DogLeg_levels_hourly[i][1]) & (HR_dike_DogLeg_levels_hourly[i+2][1] > HR_dike_DogLeg_levels_hourly[i+1][1]) & (HR_dike_DogLeg_levels_hourly[i+1][1] < p_DogLeg(polyX_DogLeg).mean()):
HR_dike_DogLeg_minlevels.append([HR_dike_DogLeg_levels_hourly[i+1][0], HR_dike_DogLeg_levels_hourly[i+1][1]])
HR_dike_DogLeg_maxlevels = np.array(HR_dike_DogLeg_maxlevels)
HR_dike_DogLeg_minlevels = np.array(HR_dike_DogLeg_minlevels)
HR_dike_OldSaw_maxlevels = []
HR_dike_OldSaw_minlevels = []
for i in range(len(HR_dike_OldSaw_levels_hourly)-2):
if (HR_dike_OldSaw_levels_hourly[i+1][1] > HR_dike_OldSaw_levels_hourly[i][1]) & (HR_dike_OldSaw_levels_hourly[i+2][1] < HR_dike_OldSaw_levels_hourly[i+1][1]) & (HR_dike_OldSaw_levels_hourly[i+1][1] > p_OldSaw(polyX_OldSaw).mean()):
HR_dike_OldSaw_maxlevels.append([HR_dike_OldSaw_levels_hourly[i+1][0], HR_dike_OldSaw_levels_hourly[i+1][1]]) # high tides
if (HR_dike_OldSaw_levels_hourly[i+1][1] < HR_dike_OldSaw_levels_hourly[i][1]) & (HR_dike_OldSaw_levels_hourly[i+2][1] > HR_dike_OldSaw_levels_hourly[i+1][1]) & (HR_dike_OldSaw_levels_hourly[i+1][1] < p_OldSaw(polyX_OldSaw).mean()):
HR_dike_OldSaw_minlevels.append([HR_dike_OldSaw_levels_hourly[i+1][0], HR_dike_OldSaw_levels_hourly[i+1][1]])
HR_dike_OldSaw_maxlevels = np.array(HR_dike_OldSaw_maxlevels)
HR_dike_OldSaw_minlevels = np.array(HR_dike_OldSaw_minlevels)
"""
Misc sensor mins and maxes (hourly time steps)
"""
x_HighToss_datenum_maxlevels, y_HighToss_maxlevels = HR_dike_HighToss_maxlevels.T
x_HighToss_datenum_minlevels, y_HighToss_minlevels = HR_dike_HighToss_minlevels.T
x_MillCreek_datenum_maxlevels, y_MillCreek_maxlevels = HR_dike_MillCreek_maxlevels.T # these are seasonal, not tidal
x_MillCreek_datenum_minlevels, y_MillCreek_minlevels = HR_dike_MillCreek_minlevels.T # these are seasonal, not tidal
x_CNRUS_datenum_maxlevels, y_CNRUS_maxlevels = HR_dike_CNRUS_maxlevels.T
x_CNRUS_datenum_minlevels, y_CNRUS_minlevels = HR_dike_CNRUS_minlevels.T
x_DogLeg_datenum_maxlevels, y_DogLeg_maxlevels = HR_dike_DogLeg_maxlevels.T
x_DogLeg_datenum_minlevels, y_DogLeg_minlevels = HR_dike_DogLeg_minlevels.T
x_OldSaw_datenum_maxlevels, y_OldSaw_maxlevels = HR_dike_OldSaw_maxlevels.T
x_OldSaw_datenum_minlevels, y_OldSaw_minlevels = HR_dike_OldSaw_minlevels.T
# plots
plt.figure()
pylab.plot(x_HighToss_datenum_maxlevels, y_HighToss_maxlevels, 'o', markersize=1)
pylab.plot(x_HighToss_datenum_minlevels, y_HighToss_minlevels, 'o', markersize=1)
pylab.plot(x_MillCreek_datenum_maxlevels, y_MillCreek_maxlevels, 'o', markersize=1) # might give seasonal range?
pylab.plot(x_MillCreek_datenum_minlevels, y_MillCreek_minlevels, 'o', markersize=1) # might give seasonal range?
pylab.plot(x_CNRUS_datenum_maxlevels, y_CNRUS_maxlevels, 'o', markersize=1)
pylab.plot(x_CNRUS_datenum_minlevels, y_CNRUS_minlevels, 'o', markersize=1)
pylab.plot(x_DogLeg_datenum_maxlevels, y_DogLeg_maxlevels, 'o', markersize=1)
pylab.plot(x_DogLeg_datenum_minlevels, y_DogLeg_minlevels, 'o', markersize=1)
pylab.plot(x_OldSaw_datenum_maxlevels, y_OldSaw_maxlevels, 'o', markersize=1)
pylab.plot(x_OldSaw_datenum_minlevels, y_OldSaw_minlevels, 'o', markersize=1)
# trendlines
idx_HighToss_max = np.isfinite(x_HighToss_datenum_maxlevels) & np.isfinite(y_HighToss_maxlevels)
idx_HighToss_min = np.isfinite(x_HighToss_datenum_minlevels) & np.isfinite(y_HighToss_minlevels)
z_HighToss_max = np.polyfit(x_HighToss_datenum_maxlevels[idx_HighToss_max], y_HighToss_maxlevels[idx_HighToss_max], 1)
z_HighToss_min = np.polyfit(x_HighToss_datenum_minlevels[idx_HighToss_min], y_HighToss_minlevels[idx_HighToss_min], 1)
p_HighToss_max = np.poly1d(z_HighToss_max)
p_HighToss_min = np.poly1d(z_HighToss_min)
idx_MillCreek_max = np.isfinite(x_MillCreek_datenum_maxlevels) & np.isfinite(y_MillCreek_maxlevels) # seasonal?
idx_MillCreek_min = np.isfinite(x_MillCreek_datenum_minlevels) & np.isfinite(y_MillCreek_minlevels) # seasonal?
z_MillCreek_max = np.polyfit(x_MillCreek_datenum_maxlevels[idx_MillCreek_max], y_MillCreek_maxlevels[idx_MillCreek_max], 1) # seasonal?
z_MillCreek_min = np.polyfit(x_MillCreek_datenum_minlevels[idx_MillCreek_min], y_MillCreek_minlevels[idx_MillCreek_min], 1) # seasonal?
p_MillCreek_max = np.poly1d(z_MillCreek_max) # seasonal?
p_MillCreek_min = np.poly1d(z_MillCreek_min) # seasonal?
idx_CNRUS_max = np.isfinite(x_CNRUS_datenum_maxlevels) & np.isfinite(y_CNRUS_maxlevels)
idx_CNRUS_min = np.isfinite(x_CNRUS_datenum_minlevels) & np.isfinite(y_CNRUS_minlevels)
z_CNRUS_max = np.polyfit(x_CNRUS_datenum_maxlevels[idx_CNRUS_max], y_CNRUS_maxlevels[idx_CNRUS_max], 1)
z_CNRUS_min = np.polyfit(x_CNRUS_datenum_minlevels[idx_CNRUS_min], y_CNRUS_minlevels[idx_CNRUS_min], 1)
p_CNRUS_max = np.poly1d(z_CNRUS_max)
p_CNRUS_min = np.poly1d(z_CNRUS_min)
idx_DogLeg_max = np.isfinite(x_DogLeg_datenum_maxlevels) & np.isfinite(y_DogLeg_maxlevels)
idx_DogLeg_min = np.isfinite(x_DogLeg_datenum_minlevels) & np.isfinite(y_DogLeg_minlevels)
z_DogLeg_max = np.polyfit(x_DogLeg_datenum_maxlevels[idx_DogLeg_max], y_DogLeg_maxlevels[idx_DogLeg_max], 1)
z_DogLeg_min = np.polyfit(x_DogLeg_datenum_minlevels[idx_DogLeg_min], y_DogLeg_minlevels[idx_DogLeg_min], 1)
p_DogLeg_max = np.poly1d(z_DogLeg_max)
p_DogLeg_min = np.poly1d(z_DogLeg_min)
idx_OldSaw_max = np.isfinite(x_OldSaw_datenum_maxlevels) & np.isfinite(y_OldSaw_maxlevels)
idx_OldSaw_min = np.isfinite(x_OldSaw_datenum_minlevels) & np.isfinite(y_OldSaw_minlevels)
z_OldSaw_max = np.polyfit(x_OldSaw_datenum_maxlevels[idx_OldSaw_max], y_OldSaw_maxlevels[idx_OldSaw_max], 1)
z_OldSaw_min = np.polyfit(x_OldSaw_datenum_minlevels[idx_OldSaw_min], y_OldSaw_minlevels[idx_OldSaw_min], 1)
p_OldSaw_max = np.poly1d(z_OldSaw_max)
p_OldSaw_min = np.poly1d(z_OldSaw_min)
# plotted trendlines
polyX_HighToss_max = np.linspace(x_HighToss_datenum_maxlevels.min(), x_HighToss_datenum_maxlevels.max(), 100)
polyX_HighToss_min = np.linspace(x_HighToss_datenum_minlevels.min(), x_HighToss_datenum_minlevels.max(), 100)
polyX_MillCreek_max = np.linspace(x_MillCreek_datenum_maxlevels.min(), x_MillCreek_datenum_maxlevels.max(), 100)
polyX_MillCreek_min = np.linspace(x_MillCreek_datenum_minlevels.min(), x_MillCreek_datenum_minlevels.max(), 100)
polyX_CNRUS_max = np.linspace(x_CNRUS_datenum_maxlevels.min(), x_CNRUS_datenum_maxlevels.max(), 100)
polyX_CNRUS_min = np.linspace(x_CNRUS_datenum_minlevels.min(), x_CNRUS_datenum_minlevels.max(), 100)
polyX_DogLeg_max = np.linspace(x_DogLeg_datenum_maxlevels.min(), x_DogLeg_datenum_maxlevels.max(), 100)
polyX_DogLeg_min = np.linspace(x_DogLeg_datenum_minlevels.min(), x_DogLeg_datenum_minlevels.max(), 100)
polyX_OldSaw_max = np.linspace(x_OldSaw_datenum_maxlevels.min(), x_OldSaw_datenum_maxlevels.max(), 100)
polyX_OldSaw_min = np.linspace(x_OldSaw_datenum_minlevels.min(), x_OldSaw_datenum_minlevels.max(), 100)
pylab.plot(polyX_HighToss_max,p_HighToss_max(polyX_HighToss_max),"blue", label='Mean High HT Level')
pylab.plot(polyX_HighToss_min,p_HighToss_min(polyX_HighToss_min),"orange", label='Mean Low HT Level')
pylab.plot(polyX_MillCreek_max,p_MillCreek_max(polyX_MillCreek_max),"green", label='Mean High MC Level') # seasonal?
pylab.plot(polyX_MillCreek_min,p_MillCreek_min(polyX_MillCreek_min),"red", label='Mean Low MC Level') # seasonal?
pylab.plot(polyX_CNRUS_max,p_CNRUS_max(polyX_CNRUS_max),"purple", label='Mean High CNR Level')
pylab.plot(polyX_CNRUS_min,p_CNRUS_min(polyX_CNRUS_min),"brown", label='Mean Low CNR Level')
pylab.plot(polyX_DogLeg_max,p_DogLeg_max(polyX_DogLeg_max),"pink", label='Mean High DL Level')
pylab.plot(polyX_DogLeg_min,p_DogLeg_min(polyX_DogLeg_min),"grey", label='Mean Low DL Level')
pylab.plot(polyX_OldSaw_max,p_OldSaw_max(polyX_OldSaw_max),"yellow", label='Mean High OS Level')
pylab.plot(polyX_OldSaw_min,p_OldSaw_min(polyX_OldSaw_min),"cyan", label='Mean Low OS Level')
# the line equation:
print("y=%.6fx+(%.6f)"%(z_HighToss_max[0],z_HighToss_max[1]))
print("y=%.6fx+(%.6f)"%(z_HighToss_min[0],z_HighToss_min[1]))
print("y=%.6fx+(%.6f)"%(z_MillCreek_max[0],z_MillCreek_max[1]))
print("y=%.6fx+(%.6f)"%(z_MillCreek_min[0],z_MillCreek_min[1]))
print("y=%.6fx+(%.6f)"%(z_CNRUS_max[0],z_CNRUS_max[1]))
print("y=%.6fx+(%.6f)"%(z_CNRUS_min[0],z_CNRUS_min[1]))
print("y=%.6fx+(%.6f)"%(z_DogLeg_max[0],z_DogLeg_max[1]))
print("y=%.6fx+(%.6f)"%(z_DogLeg_min[0],z_DogLeg_min[1]))
print("y=%.6fx+(%.6f)"%(z_OldSaw_max[0],z_OldSaw_max[1]))
print("y=%.6fx+(%.6f)"%(z_OldSaw_min[0],z_OldSaw_min[1]))
# Show X-axis major tick marks as dates
loc= mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=18)
plt.ylabel('Elevation (m)', fontsize=16)
plt.legend()
pylab.show()
# Max and min of trendlines
# High Toss
HTlev_july2017_max = z_HighToss_max[0]*x_HighToss_datenum_maxlevels.min()+z_HighToss_max[1]
HTlev_july2017_min = z_HighToss_min[0]*x_HighToss_datenum_minlevels.min()+z_HighToss_min[1]
HTlev_july2019_max = z_HighToss_max[0]*x_HighToss_datenum_maxlevels.max()+z_HighToss_max[1]
HTlev_july2019_min = z_HighToss_min[0]*x_HighToss_datenum_minlevels.max()+z_HighToss_min[1]
HTrlrhigh_july2017tojuly2019 = HTlev_july2019_max-HTlev_july2017_max
HTrlrlow_july2017tojuly2019 = HTlev_july2019_min-HTlev_july2017_min
HTrlrhigh_oneyear = HTrlrhigh_july2017tojuly2019/(x_HighToss_datenum_maxlevels[-1]-x_HighToss_datenum_maxlevels[0])*365
HTrlrlow_oneyear = HTrlrlow_july2017tojuly2019/(x_HighToss_datenum_minlevels[-1]-x_HighToss_datenum_minlevels[0])*365
print("Maximum mean river level at the High Toss sensor is increasing by ", ("%.3f"%(HTrlrhigh_oneyear)), "m per year.")
print("Maximum mean river level at that location goes from an average of ", ("%.3f"%(HTlev_july2017_max)), "m in July 2017")
print("to ", ("%.3f"%(HTlev_july2019_max)), "m in July 2019.")
print("Minimum mean river level at the High Toss sensor is increasing by ", ("%.3f"%(HTrlrlow_oneyear)), "m per year.")
print("Minimum mean river level at that location goes from an average of ", ("%.3f"%(HTlev_july2017_min)), "m in July 2017")
print("to ", ("%.3f"%(HTlev_july2019_min)), "m in July 2019.")
# Mill Creek Seasonal
MClev_july2017_max = z_MillCreek_max[0]*x_MillCreek_datenum_maxlevels.min()+z_MillCreek_max[1]
MClev_july2017_min = z_MillCreek_min[0]*x_MillCreek_datenum_minlevels.min()+z_MillCreek_min[1]
MClev_july2019_max = z_MillCreek_max[0]*x_MillCreek_datenum_maxlevels.max()+z_MillCreek_max[1]
MClev_july2019_min = z_MillCreek_min[0]*x_MillCreek_datenum_minlevels.max()+z_MillCreek_min[1]
MCrlrhigh_july2017tojuly2019 = MClev_july2019_max-MClev_july2017_max
MCrlrlow_july2017tojuly2019 = MClev_july2019_min-MClev_july2017_min
MCrlrhigh_oneyear = MCrlrhigh_july2017tojuly2019/(x_MillCreek_datenum_maxlevels[-1]-x_MillCreek_datenum_maxlevels[0])*365
MCrlrlow_oneyear = MCrlrlow_july2017tojuly2019/(x_MillCreek_datenum_minlevels[-1]-x_MillCreek_datenum_minlevels[0])*365
print("Maximum mean Mill Creek level is increasing by ", ("%.3f"%(MCrlrhigh_oneyear)), "m per year.")
print("Maximum mean creek level at that location goes from an average of ", ("%.3f"%(MClev_july2017_max)), "m in July 2017")
print("to ", ("%.3f"%(MClev_july2019_max)), "m in July 2019.")
print("Minimum mean Mill Creek level is increasing by ", ("%.3f"%(MCrlrlow_oneyear)), "m per year.")
print("Minimum mean creek level at that location goes from an average of ", ("%.3f"%(MClev_july2017_min)), "m in July 2017")
print("to ", ("%.3f"%(MClev_july2019_min)), "m in July 2019.")
# CNR U/S
CNRlev_july2017_max = z_CNRUS_max[0]*x_CNRUS_datenum_maxlevels.min()+z_CNRUS_max[1]
CNRlev_july2017_min = z_CNRUS_min[0]*x_CNRUS_datenum_minlevels.min()+z_CNRUS_min[1]
CNRlev_july2019_max = z_CNRUS_max[0]*x_CNRUS_datenum_maxlevels.max()+z_CNRUS_max[1]
CNRlev_july2019_min = z_CNRUS_min[0]*x_CNRUS_datenum_minlevels.max()+z_CNRUS_min[1]
CNRrlrhigh_july2017tojuly2019 = CNRlev_july2019_max-CNRlev_july2017_max
CNRrlrlow_july2017tojuly2019 = CNRlev_july2019_min-CNRlev_july2017_min
CNRrlrhigh_oneyear = CNRrlrhigh_july2017tojuly2019/(x_CNRUS_datenum_maxlevels[-1]-x_CNRUS_datenum_maxlevels[0])*365
CNRrlrlow_oneyear = CNRrlrlow_july2017tojuly2019/(x_CNRUS_datenum_minlevels[-1]-x_CNRUS_datenum_minlevels[0])*365
print("Maximum mean river level at the CNR U/S sensor is increasing by ", ("%.3f"%(CNRrlrhigh_oneyear)), "m per year.")
print("Maximum mean river level at that location goes from an average of ", ("%.3f"%(CNRlev_july2017_max)), "m in July 2017")
print("to ", ("%.3f"%(CNRlev_july2019_max)), "m in July 2019.")
print("Minimum mean river level at the CNR U/S sensor is increasing by ", ("%.3f"%(CNRrlrlow_oneyear)), "m per year.")
print("Minimum mean river level at that location goes from an average of ", ("%.3f"%(CNRlev_july2017_min)), "m in July 2017")
print("to ", ("%.3f"%(CNRlev_july2019_min)), "m in July 2019.")
# Dog Leg
DLlev_july2017_max = z_DogLeg_max[0]*x_DogLeg_datenum_maxlevels.min()+z_DogLeg_max[1]
DLlev_july2017_min = z_DogLeg_min[0]*x_DogLeg_datenum_minlevels.min()+z_DogLeg_min[1]
DLlev_july2019_max = z_DogLeg_max[0]*x_DogLeg_datenum_maxlevels.max()+z_DogLeg_max[1]
DLlev_july2019_min = z_DogLeg_min[0]*x_DogLeg_datenum_minlevels.max()+z_DogLeg_min[1]
DLrlrhigh_july2017tojuly2019 = DLlev_july2019_max-DLlev_july2017_max
DLrlrlow_july2017tojuly2019 = DLlev_july2019_min-DLlev_july2017_min
DLrlrhigh_oneyear = DLrlrhigh_july2017tojuly2019/(x_DogLeg_datenum_maxlevels[-1]-x_DogLeg_datenum_maxlevels[0])*365
DLrlrlow_oneyear = DLrlrlow_july2017tojuly2019/(x_DogLeg_datenum_minlevels[-1]-x_DogLeg_datenum_minlevels[0])*365
print("Maximum mean river level at Dog Leg is increasing by ", ("%.3f"%(DLrlrhigh_oneyear)), "m per year.")
print("Maximum mean river level at that location goes from an average of ", ("%.3f"%(DLlev_july2017_max)), "m in July 2017")
print("to ", ("%.3f"%(DLlev_july2019_max)), "m in July 2019.")
print("Minimum mean river level at Dog Leg is increasing by ", ("%.3f"%(DLrlrlow_oneyear)), "m per year.")
print("Minimum mean river level at that location goes from an average of ", ("%.3f"%(DLlev_july2017_min)), "m in July 2017")
print("to ", ("%.3f"%(DLlev_july2019_min)), "m in July 2019.")
# Old Saw
OSlev_june2018_max = z_OldSaw_max[0]*x_OldSaw_datenum_maxlevels.min()+z_OldSaw_max[1]
OSlev_june2018_min = z_OldSaw_min[0]*x_OldSaw_datenum_minlevels.min()+z_OldSaw_min[1]
OSlev_dec2018_max = z_OldSaw_max[0]*x_OldSaw_datenum_maxlevels.max()+z_OldSaw_max[1]
OSlev_dec2018_min = z_OldSaw_min[0]*x_OldSaw_datenum_minlevels.max()+z_OldSaw_min[1]
OSslrhigh_june2018todec2018 = OSlev_dec2018_max-OSlev_june2018_max
OSslrlow_june2018todec2018 = OSlev_dec2018_min-OSlev_june2018_min
print("Maximum mean sea level at Old Saw changed", ("%.3f"%(OSslrhigh_june2018todec2018)), "m between June 2018 and December 2018.")
print("Minimum mean sea level at Old Saw changed", ("%.3f"%(OSslrlow_june2018todec2018)), "m between June 2018 and December 2018.")
# Max and min of trendlines
# High Toss
HTlev_july2017_max = z_HighToss_max[0]*x_HighToss_datenum_maxlevels.min()+z_HighToss_max[1]
HTlev_july2017_min = z_HighToss_min[0]*x_HighToss_datenum_minlevels.min()+z_HighToss_min[1]
HTlev_july2019_max = z_HighToss_max[0]*x_HighToss_datenum_maxlevels.max()+z_HighToss_max[1]
HTlev_july2019_min = z_HighToss_min[0]*x_HighToss_datenum_minlevels.max()+z_HighToss_min[1]
HTrlrhigh_july2017tojuly2019 = HTlev_july2019_max-HTlev_july2017_max
HTrlrlow_july2017tojuly2019 = HTlev_july2019_min-HTlev_july2017_min
HTrlrhigh_oneyear = HTrlrhigh_july2017tojuly2019/(x_HighToss_datenum_maxlevels[-1]-x_HighToss_datenum_maxlevels[0])*365
HTrlrlow_oneyear = HTrlrlow_july2017tojuly2019/(x_HighToss_datenum_minlevels[-1]-x_HighToss_datenum_minlevels[0])*365
print("Maximum mean river level at the High Toss sensor is increasing by ", ("%.3f"%(HTrlrhigh_oneyear)), "m per year.")
print("Maximum mean river level at that location goes from an average of ", ("%.3f"%(HTlev_july2017_max)), "m in July 2017")
print("to ", ("%.3f"%(HTlev_july2019_max)), "m in July 2019.")
print("Minimum mean river level at the High Toss sensor is increasing by ", ("%.3f"%(HTrlrlow_oneyear)), "m per year.")
print("Minimum mean river level at that location goes from an average of ", ("%.3f"%(HTlev_july2017_min)), "m in July 2017")
print("to ", ("%.3f"%(HTlev_july2019_min)), "m in July 2019.")
#%% Regression plot between Salinity levels and Water Elevations, CNR U/S CTD (proceeding cells must be run first)
HR_dike_CNRUS_sal_maxlevsdf = | pd.DataFrame({'Date':HR_dike_CNRUS_sal_maxlevels[:,0], 'Salinity':HR_dike_CNRUS_sal_maxlevels[:,1]}) | pandas.DataFrame |
import time
import math
import numpy
import pandas
import random
import argparse
from glob import glob
from PIL import Image
import albumentations
import torch
from torch import optim, nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from sklearn.model_selection import StratifiedKFold, train_test_split
from tqdm import tqdm
from gradual_warmup_scheduler import GradualWarmupScheduler
from image_model import ImageModel
from wheat_dataset import WheatDataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from config import *
from sklearn.metrics import mean_squared_error
def files_in_path(file_extension: str, path: str, recursive: bool = True):
return glob(path + f'/**/*.{file_extension}', recursive=recursive)
def seed_all(seed=42561):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def get_score(y_trues, y_preds, config):
if config.regression:
y_true = y_trues
y_pred = y_preds
else:
y_true = numpy.argmax(y_trues, axis=1) + 1
y_pred = sum([y_preds[:, i] * (i + 1) for i in range(7)])
rmse = math.sqrt(mean_squared_error(y_true, y_pred))
return rmse
def get_train_test_df(root_dir: str, images_dir: str) -> tuple:
train_df = pandas.read_csv(os.path.join(root_dir, 'Train.csv'))
all_files = files_in_path(path=images_dir, file_extension='jpeg')
dir_split_char = '/' if '/' in all_files[0] else '\\'
all_ids = [file.split(dir_split_char)[-1].split('.')[0] for file in all_files]
test_ids = set(all_ids) - set(train_df.UID.values.tolist())
test_df = pandas.DataFrame(test_ids, columns=['UID'])
return train_df, test_df
def create_folds_dataset(train_df: pandas.DataFrame, folds: int):
print("Creating folds dataset!")
train_df.loc[:, "kfold"] = -1
train_df = train_df.sample(frac=1, random_state=57543).reset_index(drop=True)
X = train_df.UID.values
y = train_df.growth_stage.values
kfold = StratifiedKFold(n_splits=folds, random_state=15435)
for fold, (train_index, val_index) in enumerate(kfold.split(X, y)):
train_df.loc[val_index, "kfold"] = fold
return train_df
def prepare_data(config, label_quality: int = None, force_creation: bool = False):
root_dir = config.root_dir
images_dir = config.images_dir
num_folds = config.folds
train_folds_csv = os.path.join(root_dir, 'train_fold.csv')
test_folds_csv = os.path.join(root_dir, 'test.csv')
if force_creation or not os.path.exists(train_folds_csv) or not os.path.exists(test_folds_csv):
train_df, test_df = get_train_test_df(root_dir=root_dir, images_dir=images_dir)
print('growth_stage:\n', train_df.growth_stage.value_counts())
print('label_quality:\n', train_df.label_quality.value_counts())
train_df = create_folds_dataset(train_df=train_df, folds=num_folds)
train_df.to_csv(train_folds_csv, index=False)
test_df.to_csv(test_folds_csv, index=False)
train_df = pandas.read_csv(train_folds_csv)
test_df = pandas.read_csv(test_folds_csv)
if label_quality:
train_df = train_df[train_df.label_quality == label_quality].reset_index(drop=True)
return train_df, test_df
def get_train_test_val(fold, config, debug_mode):
train_df, test_df = prepare_data(config=config, label_quality=2, force_creation=False)
df_train = train_df[train_df.kfold != fold].reset_index(drop=True)
df_val = train_df[train_df.kfold == fold].reset_index(drop=True)
if debug_mode:
df_train = df_train[0:200]
df_val = df_val[0:40]
return df_train, df_val, test_df
def get_transforms():
transforms_train = albumentations.Compose([
# albumentations.Transpose(p=0.5),
albumentations.VerticalFlip(p=0.5),
albumentations.HorizontalFlip(p=0.5),
albumentations.Rotate(limit=180, p=0.5),
albumentations.Blur(p=0.5),
albumentations.CoarseDropout(p=0.5),
albumentations.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5),
albumentations.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.2, rotate_limit=0, p=0.5),
])
transforms_val = albumentations.Compose([])
return transforms_train, transforms_val
def resize_transform(image):
return numpy.asarray(Image.fromarray(image.astype(numpy.uint8)).resize((config.image_size, config.image_size)))
def get_test_loader(test_df, config, transforms):
test_dataset = WheatDataset(df=test_df, config=config, tile_mode=0, rand=False, transform=transforms,
resize_transform=resize_transform)
test_loader = DataLoader(test_dataset, batch_size=config.val_batch_size, sampler=SequentialSampler(test_dataset),
num_workers=config.num_workers)
return test_loader
def get_dataloaders(train_df, val_df, config):
transforms_train, transforms_val = get_transforms()
train_dataset = WheatDataset(df=train_df, config=config, tile_mode=0, rand=True, transform=transforms_train,
resize_transform=resize_transform)
val_dataset = WheatDataset(df=val_df, config=config, tile_mode=0, rand=False, transform=transforms_val,
resize_transform=resize_transform)
train_loader = DataLoader(train_dataset, batch_size=config.train_batch_size, sampler=RandomSampler(train_dataset),
num_workers=config.num_workers)
valid_loader = DataLoader(val_dataset, batch_size=config.val_batch_size, sampler=SequentialSampler(val_dataset),
num_workers=config.num_workers)
return train_loader, valid_loader
def get_val_dataloader(val_df, config, transforms):
val_dataset = WheatDataset(df=val_df, config=config, tile_mode=0, rand=False, transform=transforms,
resize_transform=resize_transform)
valid_loader = DataLoader(val_dataset, batch_size=config.val_batch_size, sampler=SequentialSampler(val_dataset),
num_workers=config.num_workers)
return valid_loader
def perform_predictions(model, device, test_df, val_df, config, filename):
tta_transforms = [
dict(name='none', transform=albumentations.Compose([])),
dict(name='horizontal_flip', transform=albumentations.Compose([albumentations.HorizontalFlip(p=1)])),
dict(name='vertical_flip', transform=albumentations.Compose([albumentations.VerticalFlip(p=1)]))
]
y_trues = val_df.growth_stage
del val_df['growth_stage']
avg_preds = None
for transform_info in tta_transforms:
transform_name = transform_info.get('name')
transforms = transform_info.get('transform')
val_loader = get_val_dataloader(val_df, config, transforms)
preds = predict(model, val_loader, device)
avg_preds = preds if avg_preds is None else avg_preds + preds
score = get_score(y_trues=y_trues, y_preds=preds, config=config)
print(f"transform {transform_name} --> score = {score}")
avg_preds = avg_preds / len(tta_transforms)
score = get_score(y_trues=y_trues, y_preds=avg_preds, config=config)
print("TTA val score =", score)
print("Prediction test set")
avg_preds = None
for transform_info in tta_transforms:
transform_name = transform_info.get('name')
transforms = transform_info.get('transform')
test_loader = get_val_dataloader(test_df, config, transforms)
preds = predict(model, test_loader, device)
save_preds(preds=preds, filename=transform_name + "_" + filename, output_dir=config.output_dir)
avg_preds = preds if avg_preds is None else avg_preds + preds
avg_preds = avg_preds / len(tta_transforms)
if len(tta_transforms) > 1:
filename = 'tta_' + filename
save_preds(preds=avg_preds, filename=filename, output_dir=config.output_dir)
def save_preds(preds, filename, output_dir):
sub_df = test_df[['UID']]
sub_df['growth_stage'] = preds
sub_df.to_csv(os.path.join(output_dir, filename), index=False)
def perform_train(model, device, train_df, val_df, config, best_file, fine_tune=False):
train_loader, valid_loader = get_dataloaders(train_df, val_df, config)
criterion = nn.MSELoss() if config.regression else nn.BCEWithLogitsLoss()
best_loss, best_acc, best_score = val_epoch(loader=valid_loader, device=device, criterion=criterion)
print(f"Initial val scores. score = {best_score} loss = {best_loss} accuracy = {best_acc}")
lr = config.init_lr / config.warmup_factor
optimizer = optim.Adam(model.parameters(), lr=lr)
if config.use_gradual_lr_warmup and not fine_tune:
scheduler_cosine = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs - config.warmup_epo)
scheduler = GradualWarmupScheduler(optimizer, multiplier=config.warmup_factor, total_epoch=config.warmup_epo,
after_scheduler=scheduler_cosine)
else:
scheduler = ReduceLROnPlateau(optimizer, 'min', factor=config.learning_rate_factor, verbose=True, eps=1e-6,
patience=config.learning_rate_patience)
epochs = 20 if fine_tune else config.epochs + 1
print("Total epochs", epochs)
for epoch in range(1, epochs):
print(time.ctime(), 'Epoch:', epoch)
train_loss, train_acc, train_score = train_epoch(loader=train_loader, device=device, optimizer=optimizer,
criterion=criterion)
scheduler.step(epoch - 1)
val_loss, acc, score = val_epoch(loader=valid_loader, device=device, criterion=criterion)
content = time.ctime() + ' ' + f'Epoch {epoch}/{epochs}, lr: {optimizer.param_groups[0]["lr"]:.7f}, ' \
f'train loss: {numpy.mean(train_loss):.5f}, train acc: {train_acc:.5f} ' \
f'train score: {train_score:.5f} --> ' \
f'val loss: {numpy.mean(val_loss):.5f}, val acc: {acc:.5f} ' \
f'val score: {score:.5f}'
if val_loss < best_loss:
content += '\n\tval_loss ({:.6f} --> {:.6f}). Saving model ...'.format(best_loss, val_loss)
torch.save(model.state_dict(), os.path.join(config.output_dir, best_file))
best_loss = val_loss
else:
print(f"\tval_loss did not improve from {best_loss:.6f}")
print(content)
with open(os.path.join(config.output_dir, log_filename), 'a') as appender:
if epoch == 1:
appender.write(config_str + '\n')
appender.write(content + '\n')
print("Best Loss was", best_loss)
def train_epoch(loader, device, optimizer, criterion):
model.train()
train_loss = []
PREDS = []
TARGETS = []
bar = tqdm(loader)
for (data, target) in bar:
data, target = data.to(device), target.to(device)
loss_func = criterion
optimizer.zero_grad()
logits = model(data)
if config.regression:
logits = logits.squeeze()
loss = loss_func(logits, target)
loss.backward()
optimizer.step()
loss_np = loss.detach().cpu().numpy()
train_loss.append(loss_np)
smooth_loss = sum(train_loss[-100:]) / min(len(train_loss), 100)
bar.set_description('loss: %.5f, smth: %.5f' % (loss_np, smooth_loss))
if config.regression:
pred = logits.detach()
else:
# pred = torch.nn.functional.softmax(logits, dim=1).detach()
pred = logits.sigmoid().detach()
PREDS.append(pred)
TARGETS.append(target)
if len(PREDS[-1].shape) == 0:
# correct for batches of size 1, where output would be a scaler
PREDS[-1] = torch.FloatTensor([PREDS[-1]]).to(device)
PREDS = torch.cat(PREDS).cpu().numpy()
TARGETS = torch.cat(TARGETS).cpu().numpy()
if config.regression:
PREDS = PREDS.squeeze()
acc = (PREDS.round() == TARGETS).mean() * 100.
else:
acc = (numpy.argmax(PREDS, axis=1) == numpy.argmax(TARGETS, axis=1)).mean() * 100.
score = get_score(y_trues=TARGETS, y_preds=PREDS, config=config)
return train_loss, acc, score
def val_epoch(loader, device, criterion, get_output=False):
model.eval()
val_loss = []
LOGITS = []
PREDS = []
TARGETS = []
with torch.no_grad():
for (data, target) in tqdm(loader):
data, target = data.to(device), target.to(device)
logits = model(data)
if config.regression:
pred = logits
else:
pred = torch.nn.functional.softmax(logits, dim=1).detach()
LOGITS.append(logits)
PREDS.append(pred)
TARGETS.append(target)
if config.regression:
logits = logits.squeeze()
loss = criterion(logits, target)
val_loss.append(loss.detach().cpu().numpy())
val_loss = numpy.mean(val_loss)
LOGITS = torch.cat(LOGITS).cpu().numpy()
PREDS = torch.cat(PREDS).cpu().numpy()
TARGETS = torch.cat(TARGETS).cpu().numpy()
if config.regression:
PREDS = PREDS.squeeze()
acc = (PREDS.round() == TARGETS).mean() * 100.
else:
acc = (numpy.argmax(PREDS, axis=1) == numpy.argmax(TARGETS, axis=1)).mean() * 100.
score = get_score(y_trues=TARGETS, y_preds=PREDS, config=config)
if get_output:
return LOGITS
else:
return val_loss, acc, score
def predict(model, loader, device):
model.eval()
preds = []
with torch.no_grad():
for data in tqdm(loader):
data = data.to(device)
logits = model(data)
if config.regression:
preds.append(logits.detach())
else:
preds.append(torch.nn.functional.softmax(logits, dim=1).detach())
preds = torch.cat(preds).cpu().numpy()
if not config.regression:
preds = sum([preds[:, i] * (i + 1) for i in range(7)])
return preds
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_folder', required=True)
parser.add_argument('--output_folder', required=True)
return parser.parse_args()
args = parse_args()
DATA_DIR = args.data_folder
OUTPUT_DIR = args.output_folder
GPU_ID = 0
image_dir = os.path.join(DATA_DIR, 'Images')
seed_all(seed=354)
debug_mode = False
folds = list(range(0, 5))
device = torch.device(f'cuda:{GPU_ID}')
print(f"Creating output directory: {OUTPUT_DIR}")
os.makedirs(OUTPUT_DIR, exist_ok=True)
required_files = ['SampleSubmission.csv', 'test.csv', 'Train.csv']
for file in required_files:
if not os.path.exists(os.path.join(DATA_DIR, file)):
print(f"File {file} is missing.")
exit()
image_files = files_in_path(file_extension='jpeg', path=image_dir)
if len(image_files) < 14000:
print(f"Expected at least 14000 images in {image_dir}. Got {len(image_files)} images")
exit()
stages = [
dict(configs=[Resnet50Config, EfficientNetB0, SeResNextConfig], ensemble_filters=['horizontal', 'none', 'vertical'],
fine_tune=False, output_file='combined_test_preds.csv'),
dict(configs=[Resnet50Config, EfficientNetB0, SeResNextConfig], ensemble_filters=['horizontal', 'none'],
fine_tune=True, output_file='submission.csv')
]
for stage in stages:
configs = stage['configs']
fine_tune = stage['fine_tune']
ensemble_filters = stage['ensemble_filters']
for config in configs:
config.root_dir = DATA_DIR
config.output_dir = OUTPUT_DIR
config.images_dir = os.path.join(config.root_dir, 'Images')
config_str = "Config:\n\t" + '\n\t'.join("%s: %s" % item for item in vars(config).items() if '__' not in item[0])
print(config_str)
for fold in folds:
print(f"\tFold = {fold}")
train_df, val_df, test_df = get_train_test_val(config=config, debug_mode=debug_mode, fold=fold)
if fine_tune:
pseudo_test_df = pandas.read_csv('combined_test_preds.csv')
train_df = train_df.append(pseudo_test_df)
train_df.sample(frac=1, random_state=354).reset_index(drop=True)
print(f"{len(train_df)} Training Samples.\n{len(val_df)} Validation Samples.\n")
best_file = f'{config.neural_backbone}_best_fold{fold}_dropout_{config.dropout}.pth'
if config.regression:
best_file = 'regression_' + best_file
log_filename = best_file.replace('.pth', '.txt')
model = ImageModel(model_name=config.neural_backbone, device=device, dropout=config.dropout, neurons=0,
num_classes=config.num_classes, extras_inputs=[], base_model_pretrained_weights=None)
model.load(directory=config.output_dir, filename=best_file)
model = model.to(device)
if fine_tune:
best_file = 'finetune_' + best_file
checkpoint_path = os.path.join(config.output_dir, best_file)
if os.path.exists(checkpoint_path):
print(f"WARNING: TRAINED CHECKPOINT ALREADY EXISTS IN {checkpoint_path}. "
f"SKIPPING TRAINING FOR THIS MODEL/FOLD")
else:
print("Training model!")
perform_train(model, device, train_df, val_df, config, best_file, fine_tune)
print(f"Predicting model {config.neural_backbone} for fold {fold}!")
model.load(directory=config.output_dir, filename=best_file)
filename = "submission_" + best_file.replace('.pth', '.csv')
perform_predictions(model, device, test_df, val_df, config, filename)
csvs = files_in_path(path=config.output_dir, file_extension='csv')
files_to_ensemble = []
for filter in ensemble_filters:
if fine_tune:
files = [csv for csv in csvs if filter in csv and 'finetu' in csv]
else:
files = [csv for csv in csvs if filter in csv]
files_to_ensemble.extend(files)
print(f"Combining {len(files_to_ensemble)} predictions. Files: {files_to_ensemble}")
final = | pandas.read_csv(files_to_ensemble[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Library of functions for meteorology.
Meteorological function names
=============================
- cp_calc: Calculate specific heat
- Delta_calc: Calculate slope of vapour pressure curve
- es_calc: Calculate saturation vapour pressures
- ea_calc: Calculate actual vapour pressures
- gamma_calc: Calculate psychrometric constant
- L_calc: Calculate latent heat of vapourisation
- pottemp: Calculate potential temperature (1000 hPa reference pressure)
- rho_calc: Calculate air density
- sun_NR: Maximum sunshine duration [h] and extraterrestrial radiation [J/day]
- vpd_calc: Calculate vapour pressure deficits
- windvec: Calculate average wind direction and speed
Module requires and imports math and scipy modules.
Tested for compatibility with Python 2.7.
Function descriptions
=====================
"""
import math
import numpy as np
import pandas as pd
import scipy
def _arraytest(*args):
"""
Function to convert input parameters in as lists or tuples to
arrays, while leaving single values intact.
Test function for single values or valid array parameter input
(<NAME>).
Parameters:
args (array, list, tuple, int, float): Input values for functions.
Returns:
rargs (array, int, float): Valid single value or array function input.
Examples
--------
>>> _arraytest(12.76)
12.76
>>> _arraytest([(1,2,3,4,5),(6,7,8,9)])
array([(1, 2, 3, 4, 5), (6, 7, 8, 9)], dtype=object)
>>> x=[1.2,3.6,0.8,1.7]
>>> _arraytest(x)
array([ 1.2, 3.6, 0.8, 1.7])
>>> _arraytest('This is a string')
'This is a string'
"""
rargs = []
for a in args:
if isinstance(a, (list, tuple)):
rargs.append(scipy.array(a))
else:
rargs.append(a)
if len(rargs) == 1:
return rargs[0] # no unpacking if single value, return value i/o list
return rargs
def cp_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])):
"""
Function to calculate the specific heat of air:
.. math::
c_p = 0.24 \\cdot 4185.5 \\cdot \\left(1 + 0.8 \\cdot \\frac{0.622 \\cdot e_a}{p - e_a}\\right)
where ea is the actual vapour pressure calculated from the relative
humidity and p is the ambient air pressure.
Parameters:
- airtemp: (array of) air temperature [Celsius].
- rh: (array of) relative humidity data [%].
- airpress: (array of) air pressure data [Pa].
Returns:
cp: array of saturated c_p values [J kg-1 K-1].
References
----------
<NAME>, <NAME>, <NAME> and <NAME> (1998). Crop
Evaporation Guidelines for computing crop water requirements,
FAO - Food and Agriculture Organization of the United Nations.
Irrigation and drainage paper 56, Chapter 3. Rome, Italy.
(http://www.fao.org/docrep/x0490e/x0490e07.htm)
Examples
--------
>>> cp_calc(25,60,101300)
1014.0749457208065
>>> t = [10, 20, 30]
>>> rh = [10, 20, 30]
>>> airpress = [100000, 101000, 102000]
>>> cp_calc(t,rh,airpress)
array([ 1005.13411289, 1006.84399787, 1010.83623841])
"""
# Test input array/value
airtemp, rh, airpress = _arraytest(airtemp, rh, airpress)
# calculate vapour pressures
eact = ea_calc(airtemp, rh)
# Calculate cp
cp = 0.24 * 4185.5 * (1 + 0.8 * (0.622 * eact / (airpress - eact)))
return cp # in J/kg/K
def Delta_calc(airtemp=scipy.array([])):
"""
Function to calculate the slope of the temperature - vapour pressure curve
(Delta) from air temperature T:
.. math::
\\Delta = 1000 \\cdot \\frac{e_s \\cdot 4098}{(T + 237.3)^2}
where es is the saturated vapour pressure at temperature T.
Parameters:
- airtemp: (array of) air temperature [Celsius].
Returns:
- Delta: (array of) slope of saturated vapour curve [Pa K-1].
References
----------
Technical regulations 49, World Meteorological Organisation, 1984.
Appendix A. 1-Ap-A-3.
Examples
--------
>>> Delta_calc(30.0)
243.34309166827094
>>> x = [20, 25]
>>> Delta_calc(x)
array([ 144.6658414 , 188.62504569])
"""
# Test input array/value
airtemp = _arraytest(airtemp)
# calculate saturation vapour pressure at temperature
es = es_calc(airtemp) # in kPa
# Calculate Delta
Delta = es * 4098.0 / ((airtemp + 237.3) ** 2) * 1000
return Delta # in Pa/K
def ea_calc(airtemp=scipy.array([]), rh=scipy.array([])):
"""
Function to calculate actual vapour pressure from relative humidity:
.. math::
e_a = \\frac{rh \\cdot e_s}{100}
where es is the saturated vapour pressure at temperature T.
Parameters:
- airtemp: array of measured air temperatures [Celsius].
- rh: Relative humidity [%].
Returns:
- ea: array of actual vapour pressure [Pa].
Examples
--------
>>> ea_calc(25,60)
1900.0946514729308
"""
# Test input array/value
airtemp, rh = _arraytest(airtemp, rh)
# Calculate saturation vapour pressures
es = es_calc(airtemp) * 10 # kPa convert to hPa
# Calculate actual vapour pressure
eact = rh / 100.0 * es
return eact # in Pa
def es_calc(airtemp):
"""
Function to calculate saturated vapour pressure from temperature.
Uses the Arden-Buck equations.
Parameters:
- airtemp : (data-type) measured air temperature [Celsius].
Returns:
- es : (data-type) saturated vapour pressure [kPa].
References
----------
https://en.wikipedia.org/wiki/Arden_Buck_equation
<NAME>. (1981), "New equations for computing vapor pressure and enhancement
factor", J. Appl. Meteorol., 20: 1527–1532
Buck (1996), Buck Research CR-1A User's Manual, Appendix 1. (PDF)
Examples
--------
>>> es_calc(30.0)
4.245126
>>> x = [20, 25]
>>> es_calc(x)
array([ 2.338340, 3.168531])
"""
airtemp = | pd.to_numeric(airtemp, errors="coerce") | pandas.to_numeric |
# Preprocessor should include scaling, having two objects make it unconvinent
import pandas as pd
import numpy as np
from index.utils import ISO_to_Everything, geometric_mean, arithmetic_mean
from index.GreenGrowthStuff import GreenGrowthStuff
CAPPING_PRIOR_NORMALIZATION = ['EE1', 'EW2', 'ME2',
'EQ1', 'EQ2', 'EQ3', 'GE1',
'GE2', 'GE3', 'GV1',
'SE2',
'SL1'
]
class GreenGrowthIndex(GreenGrowthStuff):
"""
A class used to represent the Green Growth Index.
(See technical report for complete methods)
To improve: Clean up and standardize normalize step !
Attributes
----------
indicators : pd.DataFrame
Raw indicators used to compute index
sustainability_targets : pd.DataFrame
Sustainability targets as defined in the report
categories : pd.DataFrame
Sub categories of index
dimensions : pd.DataFrame
sub dimensions of index
GGI : pd.Series
Green growth index
"""
def __init__(self,
indicators,
sustainability_targets,
indicator_aggregation='arithmetic',
category_aggregation='geometric',
dimension_aggregation='geometric',
weighted_dim=False,
missing_ind_per_3cat=1,
missing_ind_per2cat=0,
missing_cat_per_dim=1,
fence='IQR',
ignore_indicators = []):
super(GreenGrowthIndex, self).__init__()
self.indicator_aggregation = indicator_aggregation
self.category_aggregation = category_aggregation
self.dimension_aggregation = dimension_aggregation
self.weighted_dim = weighted_dim
self.missing_ind_per_3cat = missing_ind_per_3cat
self.missing_ind_per2cat = missing_ind_per2cat
self.missing_cat_per_dim = missing_cat_per_dim
self.fence = fence
self.ignore_indicators = ignore_indicators
self.compute(indicators, sustainability_targets)
return None
def compute(self, indicators, sustainability_targets):
"""
Performs the whole pipeline to compute the Green Growth Index
Saves the intermediate steps into the class attributes
Need to extract the normalization step to be able to do it oustide of the index
Parameters
----------
indicators : pd.DataFrame
Raw indicators
sustainability_targets : pd.DataFrame
sustainability targets
indicator_aggregation: str
indicators aggregation method: {'geometric', 'arithmetic'}
category_aggregation: str
categories aggregation method: {'geometric', 'arithmetic'}
dimension_aggregation: str
dimensions aggregation method: {'geometric', 'arithmetic'}
weighted_dim: bool
weight dimensions by number of indicators when computing index
missing_ind_per_3cat: int
number of missing indicators allowed for categories defined by 3 indicators
missing_ind_per2cat: int
number of missing indicators allowed for categories defined by 2 indicators
missing_cat_per_dim: int
number of missing categories allowed per dimension
fence: str
fencing method for outlier capping: {'IQR', 'EXTQ'}
'IQR': fences defined using interquartile range
'EXTQ': fences defined by extreme quantiles
Returns
-------
GGI: pd.Series
a series containing green growth index
"""
# Filtering should be put in seperate function
filtered_indicators = self.IND_CAT_DIM[~self.IND_CAT_DIM.Indicator.isin(self.ignore_indicators)].Indicator
sustainability_targets = sustainability_targets.loc[filtered_indicators] # select only the desired indicators
indicators = indicators.copy(deep=True)[filtered_indicators] # select only the desired indicators
filtered_CAPPING_PRIOR_NORMALIZATION = [el for el in CAPPING_PRIOR_NORMALIZATION if el not in filtered_indicators] # No capping on removed indicators
preprocessor = GreenGrowthPreProcessor()
# Computing stats on the indicators (min, max, 25th percentile, 75th percentile)
statistics = preprocessor.compute_statistics(indicators, fence=self.fence)
# Taking out outliers from the indicators
indicators_fenced = preprocessor.cap_indicators(
indicators, statistics, filtered_CAPPING_PRIOR_NORMALIZATION)
# Normalizing the indicators using sustainability targets
indicators_normed = GreenGrowthScaler().normalize(indicators_fenced, sustainability_targets)
# Aggregating indicators into categories
categories = IndicatorsAggregation().compute(indicators_normed, average_method=self.indicator_aggregation, missing_ind_2=self.missing_ind_per2cat, missing_ind_3=self.missing_ind_per_3cat)
# Aggregating categories into dimensions
dimensions = CategoriesAggregation().compute(categories, average_method=self.category_aggregation, missing_cat=self.missing_cat_per_dim)
# Aggregating dimensions into the green growth index (TO CLEAN UP!)
dim_agg = DimensionsAggregation()
dim_agg.IND_CAT_DIM = self.IND_CAT_DIM.query("Indicator not in @self.ignore_indicators")
Index = dim_agg.compute(dimensions, average_method=self.dimension_aggregation, weighted=self.weighted_dim)
self.indicators = indicators
self.statistics = statistics
self.indicators_normed = indicators_normed
self.indicators_fenced = indicators_fenced
self.sustainability_targets = sustainability_targets
self.categories = categories
self.dimensions = dimensions
self.Index = pd.DataFrame(Index, columns=['Index'])
return self
def to_excel(self, path='GreenIndex.xlsx'):
'''
Export all the data into an excel file
Parameters
----------
path: str
Path to the excel file
'''
xls_info = [
('Index', self.Index),
('sustainability_targets', self.sustainability_targets),
('statistics', self.statistics),
('indicators', self.indicators),
('indicators_normed', self.indicators_normed),
('indicators_fenced', self.indicators_fenced),
('categories', self.categories),
('dimensions', self.dimensions),
]
with pd.ExcelWriter(path) as writer:
for sheet_name, df in xls_info:
if sheet_name not in ['sustainability_targets', 'statistics']:
df = ISO_to_Everything(df)
df.set_index(['Country', 'Region', 'Sub-region', 'Intermediate region', 'Development',
'IncomeLevel', 'HDI'], append=True, inplace=True)
df.to_excel(writer, sheet_name=sheet_name)
return None
def to_long(self):
'''
Format the results in a long dataframe (ISO, Variable, Aggregation, Value)
Parameters
----------
None
'''
names_dfs = [
('Index', self.Index),
('Indicator', self.indicators),
('Indicator_normed', self.indicators_normed),
('Indicator_fenced', self.indicators_fenced),
('Category', self.categories),
('Dimension', self.dimensions),
]
long_dfs = []
for name, df in names_dfs:
df = df.reset_index().melt(id_vars=['ISO'], value_name='Value', var_name='Variable')
df['Aggregation'] = name
long_dfs.append(df)
return pd.concat(long_dfs, axis=0).set_index('ISO')
class GreenGrowthPreProcessor():
'''Process the indicators before computing the index.
Attributes:
indicators_to_cap(List): list of indicators to cap.
'''
def __init__(self):
return None
def compute_statistics(self, indicators, fence='IQR'):
"""
Computes the meta data of the indicators table.
Parameters
----------
indicators : pd.DataFrame
Raw indicators
Returns
-------
meta_indicators: pd.DataFrame
a datafame containing for indicator:
- minimum
- maximum
- Lower fence = 25th percentile - μ x IQR
- Upper fence = 75th percentile + μ x IQR
(IQR = 75th percentile - 25th percentile, with μ = 3.0 the multiplier)
"""
# Compute stats
q_75 = indicators.quantile(0.75)
q_25 = indicators.quantile(0.25)
indicator_max = indicators.max()
indicator_min = indicators.min()
# Store in dataframe
stats = pd.concat([q_75, q_25, indicator_max, indicator_min], axis=1)
stats.columns = ['75%', '25%', 'max', 'min']
# Compute fences
if fence == 'IQR':
IQR = stats["75%"] - stats["25%"]
stats['lower fence'] = stats["25%"] - 3 * IQR
stats['upper fence'] = stats["75%"] + 3 * IQR
if fence == 'EXTQ':
stats['lower fence'] = indicators.quantile(0.02)
stats['upper fence'] = indicators.quantile(0.98)
# TO DO PROPERLY ELSWHERE
stats.loc['SE2', 'upper fence'] = 10
return stats
def cap_indicators(self, indicators, statistics, indicators_to_cap):
"""Remove outliers by caping their values.
(indicators over the upper fence and under the lower fence are replaced by the fences)
Parameters
----------
indicators : pd.DataFrame
Raw indicators
meta_indicators: pd.DataFrame
output of compute_meta
Returns
-------
indicators: pd.DataFrame
a datafame containing for indicators without outliers
"""
indicators = indicators.copy(deep=True)
upper_fence, lower_fence = statistics["upper fence"], statistics["lower fence"]
indicators.loc[:, indicators_to_cap] = indicators[indicators_to_cap].apply(lambda x: x.mask(
x >= upper_fence[x.name], upper_fence[x.name]))
indicators.loc[:, indicators_to_cap] = indicators[indicators_to_cap].apply(lambda x: x.mask(
x <= lower_fence[x.name], lower_fence[x.name]))
return indicators
class GreenGrowthScaler():
"""Scale the Green Growth Indicators.
Attributes
----------
None
"""
def __init__(self):
return None
def scale_min_max(self, X, maximum, minimum, b, a=1):
return a + (X - minimum) / (maximum - minimum) * (b - a)
def normalize_single_target_case(self, X, Xt):
Xmax = X.max()
Xmin = X.min()
max_of_Xt_Xmax = | pd.concat((Xt, Xmax), axis=1) | pandas.concat |
import datetime as dt
import pandas as pd
import numpy as np
import re
# Begin User Input Data
report_date = dt.datetime(2020, 8, 31)
wscf_market_value = 194719540.46
aqr_market_value = 182239774.63
delaware_market_value = 151551731.17
wellington_market_value = 149215529.22
qic_cash_market_value = 677011299.30
input_directory = 'U:/'
output_directory = 'U:/'
jpm_filepath = input_directory + 'CIO/#Data/input/jpm/holdings/2020/08/Priced Positions - All.csv'
wscf_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wscf_holdings.xlsx'
aqr_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/aqr_holdings.xls'
delaware_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/delaware_holdings.xlsx'
wellington_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wellington_holdings.xlsx'
qic_cash_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/07/qic_cash_holdings.xlsx'
tickers_filepath = input_directory + 'CIO/#Holdings/Data/input/tickers/tickers_201909.xlsx'
asx_filepath = input_directory + 'CIO/#Data/input/asx/ASX300/20200501-asx300.csv'
aeq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_AEQ_Manager Version.xlsx'
ieq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_IEQ_Manager Version.xlsx'
aeq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/aeq_exclusions_' + str(report_date.date()) + '.csv'
ieq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/ieq_exclusions_' + str(report_date.date()) + '.csv'
# End User Input Data
# Account Name to LGS Name dictionary
australian_equity_managers_dict = {
'LGS AUSTRALIAN EQUITIES - BLACKROCK': 'BlackRock',
'LGS AUSTRALIAN EQUITIES - ECP': 'ECP',
'LGS AUSTRALIAN EQUITIES DNR CAPITAL': 'DNR',
'LGS AUSTRALIAN EQUITIES - PENDAL': 'Pendal',
'LGS AUSTRALIAN EQUITIES - SSGA': 'SSGA',
'LGS AUSTRALIAN EQUITIES - UBIQUE': 'Ubique',
'LGS AUSTRALIAN EQUITIES - WSCF': 'First Sentier',
'LGS AUSTRALIAN EQUITIES REBALANCE': 'Rebalance',
'LGS AUST EQUITIES - ALPHINITY': 'Alphinity'
}
international_equity_managers_dict = {
'LGS INTERNATIONAL EQUITIES - WCM': 'WCM',
'LGS INTERNATIONAL EQUITIES - AQR': 'AQR',
'LGS INTERNATIONAL EQUITIES - HERMES': 'Hermes',
'LGS INTERNATIONAL EQUITIES - IMPAX': 'Impax',
'LGS INTERNATIONAL EQUITIES - LONGVI EW': 'Longview',
'LGS INTERNATIONAL EQUITIES - LSV': 'LSV',
'LGS INTERNATIONAL EQUITIES - MFS': 'MFS',
'LGS INTERNATIONAL EQUITIES - MACQUARIE': 'Macquarie',
'LGS INTERNATIONAL EQUITIES - WELLINGTON': 'Wellington',
'LGS GLOBAL LISTED PROPERTY - RESOLUTION': 'Resolution',
}
# Imports JPM Mandates holdings data
df_jpm = pd.read_csv(
jpm_filepath,
skiprows=[0, 1, 2, 3],
header=0,
usecols=[
'Account Number',
'Account Name',
'Security ID',
'ISIN',
'Security Name',
'Asset Type Description',
'Price Date',
'Market Price',
'Total Units',
'Total Market Value (Local)',
'Total Market Value (Base)',
'Local Currency'
],
parse_dates=['Price Date'],
infer_datetime_format=True
)
# Renames the columns into LGS column names
df_jpm = df_jpm.rename(
columns={
'Security ID': 'SEDOL',
'Asset Type Description': 'Asset Type',
'Price Date': 'Date',
'Market Price': 'Purchase Price Local',
'Total Units': 'Quantity',
'Total Market Value (Local)': 'Market Value Local',
'Total Market Value (Base)': 'Market Value AUD',
'Local Currency': 'Currency'
}
)
df_jpm['Purchase Price AUD'] = df_jpm['Market Value AUD'] / df_jpm['Quantity']
# Imports WSCF holdings data
df_wscf = pd.read_excel(
pd.ExcelFile(wscf_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 8],
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Name',
'Unit Holdings',
'Market Value (Local Currency)',
'Market Value (Base Currency)',
'Security Currency'
]
)
# Renames the columns into LGS column names
df_wscf = df_wscf.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Unit Holdings': 'Quantity',
'Market Value (Local Currency)': 'Market Value Local',
'Market Value (Base Currency)': 'Market Value AUD',
'Security Currency': 'Currency'
}
)
# Scales holdings by market value
wscf_scaling_factor = wscf_market_value/df_wscf['Market Value AUD'].sum()
df_wscf['Market Value Local'] = wscf_scaling_factor * df_wscf['Market Value Local']
df_wscf['Market Value AUD'] = wscf_scaling_factor * df_wscf['Market Value AUD']
df_wscf['Quantity'] = wscf_scaling_factor * df_wscf['Quantity']
df_wscf['Purchase Price Local'] = df_wscf['Market Value Local'] / df_wscf['Quantity']
df_wscf['Purchase Price AUD'] = df_wscf['Market Value AUD'] / df_wscf['Quantity']
df_wscf['Account Number'] = 'WSCF'
df_wscf['Account Name'] = 'LGS AUSTRALIAN EQUITIES - WSCF'
df_wscf['Date'] = report_date
df_wscf['Asset Type'] = np.nan
# Imports AQR holdings data
df_aqr = pd.read_excel(
pd.ExcelFile(aqr_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7],
header=0,
usecols=[
'Sedol',
'Isin',
'Investment Description',
'Asset Type',
'Price Local',
'Base Price',
'Quantity',
'MV Local',
'MV Base',
'Ccy'
]
)
# Renames the columns into LGS column names
df_aqr = df_aqr.rename(
columns={
'Sedol': 'SEDOL',
'Isin': 'ISIN',
'Investment Description': 'Security Name',
'Price Local': 'Purchase Price Local',
'Base Price': 'Purchase Price AUD',
'MV Local': 'Market Value Local',
'MV Base': 'Market Value AUD',
'Ccy': 'Currency'
}
)
# Scales holdings by market value
aqr_scaling_factor = aqr_market_value/df_aqr['Market Value AUD'].sum()
df_aqr['Market Value Local'] = aqr_scaling_factor * df_aqr['Market Value Local']
df_aqr['Market Value AUD'] = aqr_scaling_factor * df_aqr['Market Value AUD']
df_aqr['Quantity'] = aqr_scaling_factor * df_aqr['Quantity']
df_aqr['Account Number'] = 'AQR'
df_aqr['Account Name'] = 'LGS INTERNATIONAL EQUITIES - AQR'
df_aqr['Date'] = report_date
# Imports Delaware holdings data
df_delaware = pd.read_excel(
pd.ExcelFile(delaware_filepath),
sheet_name='EM SICAV holdings 7-31-2020',
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Description (Short)',
'Position Date',
'Shares/Par',
'Trading Currency',
'Traded Market Value (Local)',
'Traded Market Value (AUD)'
]
)
# Renames the columns into LGS column names
df_delaware = df_delaware.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Security Description (Short)': 'Security Name',
'Position Date': 'Date',
'Shares/Par': 'Quantity',
'Trading Currency': 'Currency',
'Traded Market Value (Local)': 'Market Value Local',
'Traded Market Value (AUD)': 'Market Value AUD'
}
)
# Scales holdings by market value
delaware_scaling_factor = delaware_market_value/df_delaware['Market Value AUD'].sum()
df_delaware['Market Value Local'] = delaware_scaling_factor * df_delaware['Market Value Local']
df_delaware['Market Value AUD'] = delaware_scaling_factor * df_delaware['Market Value AUD']
df_delaware['Quantity'] = delaware_scaling_factor * df_aqr['Quantity']
df_delaware['Purchase Price Local'] = df_delaware['Market Value Local'] / df_delaware['Quantity']
df_delaware['Purchase Price AUD'] = df_delaware['Market Value AUD'] / df_delaware['Quantity']
df_delaware['Account Number'] = 'MACQUARIE'
df_delaware['Account Name'] = 'LGS INTERNATIONAL EQUITIES - MACQUARIE'
df_delaware['Date'] = report_date
# Imports Wellington holdings data
df_wellington = pd.read_excel(
| pd.ExcelFile(wellington_filepath) | pandas.ExcelFile |
import os
import pytz
from collections import namedtuple
from datetime import datetime, timedelta
import requests
from dotenv import load_dotenv
import pandas as pd
from tensorflow.keras.models import load_model
import numpy as np
# Named tuple for aid in the data parse
fields = ['date', 'open', 'close', 'high', 'low', 'vols']
TickerData = namedtuple('TickerData', fields)
def last_close():
est = pytz.timezone('US/Eastern')
utc = pytz.utc
# TIME_FORMAT = '%H:%M:%S'
# DATE_FORMAT = '%Y-%m-%d'
est_time_now = datetime.now(tz=utc).astimezone(est)
est_date = est_time_now.replace(hour=0, minute=0, second=0, microsecond=0)
market_open = est_date + timedelta(hours=9.5)
market_close = est_date + timedelta(hours=16)
if est_time_now > market_open and est_time_now < market_close:
# print('Stock Market Is Open')
last_record_date = est_date + timedelta(days=-1)
else:
# print('Stock Market Is Closed')
if est_time_now < market_open:
last_record_date = est_date + timedelta(days=-1)
else:
last_record_date = est_date
return last_record_date
def get_stock_data(stock_symbol, start_date, end_date):
project_dir = os.getcwd()
env_file = os.path.join(project_dir, '.env')
load_dotenv(dotenv_path=env_file,verbose=True)
TIINGO_API_KEY = os.getenv("TIINGO_API_KEY")
assert TIINGO_API_KEY
"""
Make an REST API call to the tiingo API to get historic stock data
Parameters
----------
stock_symbol : str
US stock market symbol
start_date : str
yyyy-mm-dd formated date that begins time series
end_date : str
yyyy-mm-dd formated date that ends the time series
returns
-------
response : request.response
The response object to be parsed
"""
base_url = f'https://api.tiingo.com/tiingo/daily/{stock_symbol}/prices?'
payload = {
'token':TIINGO_API_KEY,
'startDate':start_date,
'endDate':end_date
}
response = requests.get(base_url, params=payload)
return response
def parse_json(response):
"""
Parameters
----------
response : requests.response object
The response object to be parsed
Returns
-------
records : list
list of named tuples that represent the ticker data
"""
json_response = response.json()
records = []
for json_object in json_response:
d = json_object['date']
o = json_object['open']
c = json_object['close']
h = json_object['high']
l = json_object['low']
v = json_object['volume']
ticker_data = TickerData(d, o, c, h, l, v)
records.append(ticker_data)
return records
def model_path(debug=False):
project_dir = os.getcwd()
models_dir = os.path.join(project_dir,'models')
model_path = os.path.join(models_dir,'lstm_forecast.h5')
if debug:
print(model_path)
try:
assert os.path.exists(model_path)
except AssertionError as e:
print('----'*20)
print('INVALID FILE PATH FOR MODEL ---> {}'.format(model_path))
print('----'*20)
model_path = None
return model_path
def market_predict():
est = pytz.timezone('US/Eastern')
ticker = 'SPY'
end_date = last_close().astimezone(est)
start_date = end_date + timedelta(days=-175)
# print(start_date.strftime(r'%Y-%m-%d'))
# print(end_date.strftime(r'%Y-%m-%d'))
response = get_stock_data(
ticker,
start_date.strftime(r'%Y-%m-%d'),
end_date.strftime(r'%Y-%m-%d'))
records = parse_json(response)
df = pd.DataFrame(records)
# ---------------Fix the date to be UTC equivalent of EST Stock Market Close
utc = pytz.utc
est = pytz.timezone('US/Eastern')
date_format='%Y-%m-%d'
# Convert datestring to datetime tz-naive
df['date'] = pd.to_datetime(df['date'], format=date_format, exact=False).dt.tz_localize(None)
# add 16 hours to tz-naive datetime
df['date'] = df['date'] + | pd.DateOffset(hours=16) | pandas.DateOffset |
import os
import sys
import logging
import inspect
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from drain import util, metrics
from drain.step import Step, Call
class FitPredict(Step):
"""
Step which can fit a scikit-learn estimator and make predictions.
"""
def __init__(self, inputs,
return_estimator=False,
return_feature_importances=True,
return_predictions=True,
prefit=False,
predict_train=False):
"""
Args:
return_estimator: whether to return the fitted estimator object
return_feature_importances: whether to return a DataFrame of feature importances
prefit: whether the estimator input is already fitted
predict_train: whether to make predictions on training set
"""
Step.__init__(self, inputs=inputs, return_estimator=return_estimator,
return_feature_importances=return_feature_importances,
return_predictions=return_predictions, prefit=prefit,
predict_train=predict_train)
def run(self, estimator, X, y=None, train=None, test=None, aux=None, sample_weight=None,
feature_importances=None):
if not self.prefit:
if y is None:
raise ValueError("Need outcome data y for predictions")
if train is not None:
X_train, y_train = X[train], y[train]
else:
X_train, y_train = X, y
y_missing = y_train.isnull()
y_missing_count = y_missing.sum()
if y_missing.sum() > 0:
logging.info('Dropping %s training examples with missing outcomes'
% y_missing_count)
y_train = y_train[~y_missing]
X_train = X_train[~y_missing]
y_train = y_train.astype(bool)
logging.info('Fitting with %s examples, %s features' % X_train.shape)
if 'sample_weight' in inspect.getargspec(estimator.fit).args and\
sample_weight is not None:
logging.info('Using sample weight')
sample_weight = sample_weight.loc[y_train.index]
estimator.fit(X_train, y_train, sample_weight=sample_weight)
else:
estimator.fit(X_train, y_train)
result = {}
if self.return_estimator:
result['estimator'] = estimator
if self.return_feature_importances:
result['feature_importances'] = feature_importance(estimator, X)
if self.return_predictions:
if test is not None and not self.predict_train:
X_test, y_test = X[test], y[test]
else:
X_test, y_test = X, y
logging.info('Predicting %s examples' % len(X_test))
if y_test is not None:
y = | pd.DataFrame({'test': y_test}) | pandas.DataFrame |
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import (
IS64,
is_platform_windows,
)
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or not IS64
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(show_counts, result):
buf = StringIO()
df.info(buf=buf, show_counts=show_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
# GH37999
with tm.assert_produces_warning(
FutureWarning, match="null_counts is deprecated.+"
):
buf = StringIO()
df.info(buf=buf, null_counts=True)
assert "non-null" in buf.getvalue()
# GH37999
with pytest.raises(ValueError, match=r"null_counts used with show_counts.+"):
df.info(null_counts=True, show_counts=True)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt.get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# TODO(2.0): remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
with option_context("display.max_seq_items", 1):
assert len(printing.pprint_thing(list(range(1000)))) < 9
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with | option_context("mode.sim_interactive", True) | pandas.option_context |
import pandas as pd
import numpy as np
import json
from graphviz import Digraph
import matplotlib.pyplot as plt
class Report:
def __init__(self, scenario, file_id):
self.scenario = scenario
self.file_id = file_id
def get_path_to_hash(self):
scenario = self.scenario
return '/home/jovyan/test_data/'+scenario+'/hd2/status/hash.json'
def get_hash_data(self):
path_to_hash = self.get_path_to_hash()
with open(path_to_hash) as json_file:
data = json.load(json_file)
return data
def get_files(self):
data = self.get_hash_data()
files = [elem['file_name'] for elem in data]
return files
def get_path_to_report(self):
scenario = self.scenario
file_id = self.file_id
data = self.get_hash_data()
folder_hash = data[file_id]['original_hash']
return '/home/jovyan/test_data/'+scenario+'/hd2/data/'+folder_hash+'/report.json'
def get_dict_from_report(self):
path_to_report = self.get_path_to_report()
df0 = pd.read_json(path_to_report)
df0 = df0.reset_index()
return df0.iloc[3]['gw:GWallInfo']
def get_df_from_report(self):
info = self.get_dict_from_report()
return pd.DataFrame.from_dict(info)
def print_document_summary(self):
info = self.get_dict_from_report()
d = info['gw:DocumentSummary']
for key in d:
d[key] = [d[key]]
document_summary = pd.DataFrame.from_dict(d)
document_summary.rename(columns={'gw:TotalSizeInBytes':'Total Size In Bytes',
'gw:FileType':'File Type',
'gw:Version':'Version'}, inplace=True)
print('Total Size In Bytes :', document_summary['Total Size In Bytes'].iloc[0])
print('File Type :', document_summary['File Type'].iloc[0])
print('Version :', document_summary['Version'].iloc[0])
def print_extracted_items(self):
info = self.get_dict_from_report()
d = info['gw:ExtractedItems']
for key in d:
d[key] = [d[key]]
extracted_items = pd.DataFrame.from_dict(d)
extracted_items.rename(columns={'@itemCount':'Item Count'}, inplace=True)
print("Item Count :", extracted_items['Item Count'].iloc[0])
def content_management_policy_df(self):
info = self.get_dict_from_report()
d = info['gw:ContentManagementPolicy']['gw:Camera']
df0 = pd.DataFrame.from_dict(d)
data = info['gw:ContentManagementPolicy']['gw:Camera'][0]['gw:ContentSwitch']
if len(data) == 2:
for key in data:
data[key] = [data[key]]
df = pd.DataFrame.from_dict(data)
df['@cameraName'] = df0.iloc[0]['@cameraName']
df = df[['@cameraName', 'gw:ContentName', 'gw:ContentValue']]
for i in range(1, len(df0)):
data = info['gw:ContentManagementPolicy']['gw:Camera'][i]['gw:ContentSwitch']
if len(data) == 2:
for key in data:
data[key] = [data[key]]
df1 = pd.DataFrame.from_dict(data)
df1['@cameraName'] = df0.iloc[i]['@cameraName']
df1 = df1[['@cameraName', 'gw:ContentName', 'gw:ContentValue']]
df = pd.concat([df, df1], ignore_index=True)
df.rename(columns={'@cameraName':'Camera Name',
'gw:ContentName':'Content Name',
'gw:ContentValue':'Content Value'}, inplace=True)
return df
def camera_graph(self, camera_value):
content_management_policy = self.content_management_policy_df()
gra = Digraph()
# root node
elem = camera_value
gra.node(elem, shape='box')
df0 = content_management_policy[content_management_policy['Camera Name']==elem]
content_name = list(df0['Content Name'].unique())
with gra.subgraph() as i:
i.attr(rank='same')
for elem2 in content_name:
i.node(elem2, shape='box')
for elem2 in content_name:
df00 = df0[df0['Content Name']==elem2]
k = int(df00.index[0])
text = df00.iloc[0]['Content Value']
gra.node(str(k), text, shape='box')
gra.edge(elem2, str(k))
for elem3 in df0['Content Name']:
gra.edge(elem, elem3)
return gra
def get_num_of_groups(self, text=False):
info = self.get_dict_from_report()
num_groups = info['gw:ContentGroups']['@groupCount']
if text:
print("There are " + num_groups + " groups")
else:
return num_groups
def content_groups_df(self):
info = self.get_dict_from_report()
d = info['gw:ContentGroups']['gw:ContentGroup'][0]['gw:ContentItems']['gw:ContentItem']
df = pd.DataFrame.from_dict(d)
df['gw:BriefDescription'] = info['gw:ContentGroups']['gw:ContentGroup'][0]['gw:BriefDescription']
df = df[['gw:BriefDescription', 'gw:TechnicalDescription', 'gw:InstanceCount', 'gw:TotalSizeInBytes', 'gw:AverageSizeInBytes', 'gw:MinSizeInBytes', 'gw:MaxSizeInBytes']]
num_groups = self.get_num_of_groups()
for i in range(1, int(num_groups)):
df1 = pd.DataFrame.from_dict(d)
df1['gw:BriefDescription'] = info['gw:ContentGroups']['gw:ContentGroup'][i]['gw:BriefDescription']
df1 = df1[['gw:BriefDescription', 'gw:TechnicalDescription', 'gw:InstanceCount', 'gw:TotalSizeInBytes', 'gw:AverageSizeInBytes', 'gw:MinSizeInBytes', 'gw:MaxSizeInBytes']]
df = | pd.concat([df, df1], ignore_index=True) | pandas.concat |
""" Tests for attmap equality comparison """
import copy
import numpy as np
from pandas import DataFrame as DF, Series
import pytest
from attmap import AttMap, OrdAttMap, PathExAttMap, AttMapEcho
from .conftest import ALL_ATTMAPS
from .helpers import get_att_map
__author__ = "<NAME>"
__email__ = "<EMAIL>"
@pytest.fixture(scope="function")
def basic_data():
""" Provide a test case with a couple of key-value pairs to work with. """
return {"a": 1, "b": 2}
@pytest.mark.parametrize("attmap_type", ALL_ATTMAPS)
@pytest.mark.parametrize(["s1_data", "s2_data"], [
({"c": 3}, {"d": 4}), ({}, {"c": 3}), ({"d": 4}, {})])
def test_series_labels_mismatch_is_not_equal(
basic_data, s1_data, s2_data, attmap_type):
""" Maps with differently-labeled Series as values cannot be equal. """
d1 = copy.copy(basic_data)
d1.update(s1_data)
d2 = copy.copy(basic_data)
d2.update(s2_data)
assert list(d1.keys()) != list(d2.keys())
key = "s"
m1 = get_att_map(attmap_type, {key: Series(d1)})
m2 = get_att_map(attmap_type, {key: Series(d2)})
assert m1 != m2
@pytest.mark.parametrize("attmap_type", ALL_ATTMAPS)
@pytest.mark.parametrize(["obj1", "obj2", "expected"], [
(np.array([1, 2, 3]), np.array([1, 2, 4]), False),
(np.array(["a", "b", "c"]), np.array(["a", "b", "c"]), True),
( | Series({"x": 0, "y": 0}) | pandas.Series |
import datetime
import pandas as pd
from pandas import DataFrame, Series
from pandas.api.extensions import ExtensionArray, ExtensionDtype
from pandas.api.extensions import register_extension_dtype
from qapandas.base import QAPandasBase
from enum import Enum
import numpy as np
class QACode(Enum):
orig = 0
auto = 1
manu = 2
gapf = 3
class QADtype(ExtensionDtype):
type = QACode
name = "qacode"
na_value = np.nan
@classmethod
def construct_from_string(cls, string):
if string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return QAArray
| register_extension_dtype(QADtype) | pandas.api.extensions.register_extension_dtype |
from typing import Callable, Iterator, List, Optional, Tuple, Union, Any, Iterable
from scipy.linalg import solve as spsolve, norm
import warnings
from numpy import dot, einsum, log, exp, zeros, arange, multiply, ndarray
import numpy as np
import pandas as pd
from autograd import elementwise_grad
# from lifelines.utils import StepSizer
import time
from autograd import numpy as anp
from lifelines.utils import concordance_index
def _get_efron_values_single(
X: pd.DataFrame,
T: pd.Series,
E: pd.Series,
weights: pd.Series,
entries: None,
beta: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, float]:
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
X = X.values
T = T.values
E = E.values
weights = weights.values
n, d = X.shape # n: samples; d: variables
hessian = zeros((d, d))
gradient = zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = zeros((d,)), zeros((d,))
risk_phi_x_x, tie_phi_x_x = zeros((d, d)), zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * exp(dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1): # i = n-1, n-2, n-3, ..., 3, 2, 1, 0
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was at least one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - multiply.outer(increasing_proportion, tie_phi_x)
a1 = einsum("ab,i->ab", risk_phi_x_x, denom) - einsum("ab,i->ab", tie_phi_x_x, increasing_proportion * denom)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + dot(x_death_sum, beta) + weighted_average * log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = zeros((d,))
tie_phi = 0
tie_phi_x = zeros((d,))
tie_phi_x_x = zeros((d, d))
return hessian, gradient, log_lik
def newton_rhapson_for_efron_model(
X: np.ndarray,
T: np.ndarray,
E: np.ndarray,
weights: Optional[pd.Series] = None,
entries: Optional[pd.Series] = None,
initial_point: Optional[np.ndarray] = None,
step_size: Optional[float] = None,
l1_ratio: Optional[float] = 1,
penalizer: Optional[float] = None,
precision: float = 1e-07,
show_progress: bool = False,
max_steps: int = 500
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: bool, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
CONVERGENCE_DOCS = "Please see the following tips in the lifelines documentation: https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model"
n, d = X.shape # n: samples, d: variables
idx = sorted(range(len(T)), key=T.__getitem__) # order_ascending
ridx = sorted(range(len(T)), key=idx.__getitem__)
X = X[idx,:]
T = T[idx]
E = E[idx]
X = pd.DataFrame(X)
T = | pd.Series(T) | pandas.Series |
# -*- coding: utf-8 -*-
'''
时间序列处理工具函数
TODO
----
改成class以简化函数调用传参
'''
import numpy as np
import pandas as pd
from dramkit.gentools import con_count, isnull
from dramkit.logtools.utils_logger import logger_show
#%%
def fillna_ma(series, ma=None, ma_min=1):
'''
| 用移动平均ma填充序列series中的缺失值
| ma设置填充时向前取平均数用的期数,ma_min设置最小期数
| 若ma为None,则根据最大连续缺失记录数确定ma期数
'''
if series.name is None:
series.name = 'series'
col = series.name
df = pd.DataFrame(series)
if isnull(ma):
tmp = con_count(series, lambda x: True if isnull(x) else False)
ma = 2 * tmp.max()
ma = max(ma, ma_min*2)
df[col+'_ma'] = df[col].rolling(ma, ma_min).mean()
df[col] = df[[col, col+'_ma']].apply(lambda x:
x[col] if not isnull(x[col]) else \
(x[col+'_ma'] if not isnull(x[col+'_ma']) else x[col]), axis=1)
return df[col]
#%%
def get_directional_accuracy_1step(y_true, y_pred):
'''
| 一步向前预测方向准确率计算
| y_true和y_pred为pd.Series
'''
df = pd.DataFrame({'y_true': y_true, 'y_pred': y_pred})
df['y_true_last'] = df['y_true'].shift(1)
df['d_true'] = df['y_true'] - df['y_true_last']
df['d_true'] = df['d_true'].apply(lambda x: 1 if x > 0 \
else (-1 if x < 0 else 0))
df['d_pred'] = df['y_pred'] - df['y_true_last']
df['d_pred'] = df['d_pred'].apply(lambda x: 1 if x > 0 \
else (-1 if x < 0 else 0))
return (df['d_true'] == df['d_pred']).sum() / df.shape[0]
#%%
# X二维,ycol一维一步向前预测情况
def genXy_X2d_y1d1step(df, ycol, Xcols_series_lag=None, Xcols_other=None,
gap=0):
'''
构造时间序列预测的输入X和输出y,适用于X二维,ycol一维一步向前预测情况
TODO
----
gap为-1的情况检查确认
Parameters
----------
df : pd.DataFrame
包含因变量列和所有自变量列的数据集
ycol : str
因变量序列名
Xcols_series_lag : list, None
| 序列自变量列名和历史期数,格式如:
| [(xcol1, lag1), (xcol2, lag2), ...]
| 若为None,则默认设置为[(ycol, 3)]
Xcols_other : list, None
非序列自变量列名列表
.. note::
序列自变量视为不能拿到跟因变量预测同期的数据,非序列自变量视为可以拿到同期数据
gap : int
预测跳跃期数(eg.用截止今天的数据预测gap天之后的数据)
Returns
-------
X : np.array
二维np.array,行数为样本量,列数为特征数量
y : np.array
一维np.array,长度等于样本量
'''
y_series = np.array(df[ycol])
if Xcols_series_lag is None:
Xcols_series_lag = [(ycol, 3)]
if Xcols_other is not None:
X_other = np.array(df[Xcols_other])
X, y =[], []
y_lag = max(Xcols_series_lag, key=lambda x: x[1])[1]
for k in range(len(y_series)-y_lag-gap):
X_part1 = []
for Xcol_series, lag in Xcols_series_lag:
x_tmp = np.array(df[Xcol_series])[k+y_lag-lag: k+y_lag]
X_part1.append(x_tmp.reshape(1, -1))
X_part1 = np.concatenate(X_part1, axis=1)
if Xcols_other is not None:
X_part2 = X_other[k+y_lag+gap].reshape(1, -1)
X.append(np.concatenate((X_part1, X_part2), axis=1))
else:
X.append(X_part1)
y.append(y_series[k+y_lag+gap])
X, y = np.concatenate(X, axis=0), np.array(y)
return X, y
def predict_X2d_y1d1step(model, func_pred, df_pre, ycol, Xcols_series_lag,
Xcols_other, gap, **kwargs):
'''
| 使用训练好的模型在数据df_pre上进行构造样本并预测(非滚动预测)
| 适用于X二维、ycol一维一步向前预测情况
Parameters
----------
model :
训练好的预测模型
func_pred : function
func_pred为预测函数,接受必要参数model和X,也接受可选参数**kwargs
df_pre : pd.DataFrame
待预测所用数据,其中 ``ycol`` 为待预测列
Xcols_series_lag, Xcols_other, gap :
参数意义同 :func:`genXy_X2d_y1d1step` 中参数意义
:returns: `pd.DataFrame` - 返回pd.DataFrame包含原来的数据以及预测结果ycol+'_pre'列
'''
df_pre = df_pre.copy()
df_pre[ycol+'_pre'] = np.nan
X, _ = genXy_X2d_y1d1step(df_pre, ycol, Xcols_series_lag=Xcols_series_lag,
Xcols_other=Xcols_other, gap=gap)
y_pre = func_pred(model, X, **kwargs)
df_pre.loc[df_pre.index[-len(y_pre):], ycol+'_pre'] = y_pre
return df_pre
def valid_test_predict_X2d_y1d1step(model, func_pred, df_train, df_valid,
df_test, ycol, Xcols_series_lag,
Xcols_other, gap, **kwargs):
'''
| 用训练好的模型做时间序列验证集和测试集预测(非滚动预测, 即不使用前面预测的数据作为输入)
| 适用于X二维、ycol一维一步向前预测情况
Parameters
----------
model, func_pred, ycol, Xcols_series_lag, Xcols_other, gap, **kwargs :
与 :func:`predict_X2d_y1d1step` 参数意义相同
df_train : pd.DataFrame
df_train为用作训练的数据
df_valid : pd.DataFrame
df_valid为验证集待预测数据
df_test : pd.DataFrame
df_test为测试集待预测数据
Note
----
| df_valid和df_test的列须与df_train相同
| df_valid和df_test可以为None或pd.DataFrame
| df_train、df_valid、df_test须为连续时间序列
| 当df_test不为空时,df_valid的ycol列必须有真实值
Returns
-------
valid_pre : pd.DataFrame
验证集预测结果,包含ycol+'_pre'列
test_pre : pd.DataFrame
测试集预测结果,包含ycol+'_pre'列
'''
def _is_effect(df):
if df is None or df.shape[0] == 0:
return False
elif df.shape[0] > 0:
return True
if not _is_effect(df_valid) and not _is_effect(df_test):
return None, None
y_lag = max(Xcols_series_lag, key=lambda x: x[1])[1]
# 判断df_train和df_valid样本量是否满足要求
if _is_effect(df_valid) and df_train.shape[0] < y_lag+gap:
raise ValueError(
'根据输入结构判断,训练数据df_train记录数必须大于等于{}!'.format(y_lag+gap))
if not _is_effect(df_valid) and _is_effect(df_test) and \
df_train.shape[0] < y_lag+gap:
raise ValueError(
'根据输入结构判断,训练数据df_train记录数必须大于等于{}!'.format(y_lag+gap))
# 构造验证集预测用df
if _is_effect(df_valid):
valid_pre = pd.concat((df_train.iloc[-y_lag-gap:, :], df_valid),
axis=0).reindex(columns=df_valid.columns)
else:
valid_pre = None
# 构造测试集预测用df
if _is_effect(df_test) and _is_effect(df_valid):
tmp = pd.concat((df_train, df_valid), axis=0)
test_pre = | pd.concat((tmp.iloc[-y_lag-gap:, :], df_test), axis=0) | pandas.concat |
# Copyright 2019 Nokia
# Licensed under the BSD 3 Clause Clear license
# SPDX-License-Identifier: BSD-3-Clause-Clear
import pandas as pd
import numpy as np
from datetime import datetime
import math
# increments = 0
# search_range = 0
# P7_NUM = 0
# current_date = 0
# qq_plot_start = 5
# qq_plot_end = 100
# qq_plot_increment = 5
# qq_plot_limit = 0.3
def run_edpm(feature_data, defect_data, P7, inc, date, srange, qq_start, qq_end, qq_increment, qq_limit):
global P7_NUM, increments, current_date, search_range, qq_plot_start, qq_plot_end, qq_plot_increment, qq_plot_limit
#feature_data.to_csv("feature_data.csv")
#defect_data.to_csv("defect_data.csv")
P7_NUM = P7
increments = inc
current_date = date
search_range = srange
qq_plot_start = qq_start
qq_plot_end = qq_end
qq_plot_increment = qq_increment
qq_plot_limit = qq_limit
# print('P7_NUM =', P7,
# 'increments =', inc,
# 'current_date =', date,
# 'search_range =' ,srange)
defects_start_date = defect_data['Date_Ending'].values[0]
features_start_date = feature_data['Month_Ending'].values[0]
defects_end_date = defect_data['Date_Ending'].values[-1]
features_end_date = feature_data['Month_Ending'].values[-1]
defect_data['X'] = 1+(defect_data['Date_Ending'] - defects_start_date).dt.days.astype(int)
feature_data['X'] = 1+(feature_data['Month_Ending'] - features_start_date).dt.days.astype(int)
feature_data.reset_index(inplace=True)
feature_new_days = list(range(feature_data['X'].values[0], feature_data['X'].values[-1], increments))
defect_new_days = list(range(defect_data['X'].values[0], defect_data['X'].values[-1], increments))
gap = int(((defects_start_date - features_start_date).astype('timedelta64[D]').astype(int))/increments)
#print(feature_data)
#print(defect_data)
#exit()
feature_new_data = perform_interpolation(feature_new_days, feature_data['X'].values, feature_data['Sub-feature_Arrival'].values)
defect_new_data = perform_interpolation(defect_new_days, defect_data['X'].values, defect_data['Created'].values)
resolved_new_data = perform_interpolation(defect_new_days, defect_data['X'].values, defect_data['Resolved'].values)
#print(feature_new_days)
#print(final_index)
#print("XXXXXXX")
#exit()
final_data = get_data(feature_new_days, defect_new_days, feature_new_data, defect_new_data, resolved_new_data)
final_data['WEEK'] = final_data.index.values + 1
#print(final_data)
#final_data.to_csv("data_new.csv")
#print("m: ", gap)
#print("p7: ", P7_NUM)
#print("increments: ", increments)
a, b, c = create_qq_plot(final_data['FEATURES'].values, final_data['ARRIVALS'].values)
final_data['WEEK_(X_NEW)'] = a + b * final_data['WEEK']
final_data['ARRIVALS_(Y_NEW)'] = c * final_data['FEATURES']
ssq = get_ssq(final_data['ARRIVALS'].values, final_data['WEEK_(X_NEW)'].values, final_data['ARRIVALS_(Y_NEW)'].values)
#print("SSQ:", ssq)
#print(final_data)
#exit()
N_p = current_date/(P7_NUM)
F_p = int(N_p*len(final_data['FEATURES'].dropna().values))
start_week = max(0, (F_p - search_range))
end_week = min((F_p + search_range), (len(final_data['FEATURES'].dropna().values)))
evaluation = []
for index in range(start_week, end_week):
feature_data = final_data['FEATURES'].values[:index]
arrivals = final_data['ARRIVALS'].values
week_data = np.asarray([i+1 for i in range(len(feature_data))])
#print(week_data)
a, b, c = create_qq_plot(feature_data, arrivals)
x_new = a + b * week_data
y_new = c * feature_data
#print("x_new: ", len(x_new))
#print("y_new: ", len(y_new))
#print("week_data: ", len(week_data))
#print("arrivals: ", len(arrivals))
#exit()
ssq = get_ssq(arrivals, x_new, y_new)
evaluation.append([index, a, b, c, ssq])
df = pd.DataFrame(evaluation, columns=['index', 'intercept', 'slope', 'ratio', 'ssq'])
#df.to_csv('SSQ_CHECK.csv')
best_index = df.loc[df['ssq'].idxmin()]
best_index['gap'] = gap
best_index = best_index.round(2)
result = best_index.to_dict()
result['defects_start_date'] = pd.Timestamp(defects_start_date)
result['features_start_date'] = pd.Timestamp(features_start_date)
#best_index['defects_start_date'] = defects_start_date
#best_index['features_start_date'] = features_start_date
#print(final_data)
#print(current_date)
#time_from_P7 = P7_NUM - current_date
#print(time_from_P7)
#print(final_data['FEATURES'].values)
feature_data = final_data['FEATURES'].dropna().values[int(best_index['index']):]
#predict_range = np.asarray([i+1 for i in range(current_date, P7_NUM)])
#print(len(feature_data))
#print(len(predict_range))
#exit()
#print(final_data)
#print(best_index)
#x_new = best_index['intercept'] + best_index['slope'] * predict_range
#print(x_new)
#exit()
#required_range = [i for i in predict_range if i > np.min(x_new) and i < np.max(x_new)]
#print(required_range)
y_new = best_index['ratio'] * feature_data
x_new = [current_date+i for i in range(len(y_new))]
#print(current_date)
#print(feature_data)
#print(y_new)
#y_new = perform_interpolation(required_range, x_new, y_new)
#x_new = required_range
df = pd.DataFrame({'y_new': y_new, 'x_new': x_new})
#print(df)
#exit()
final_data = final_data.merge(df, left_on='WEEK', right_on='x_new', how='outer')
#print(final_data)
#print(result)
#final_data.to_csv("FINAl_DATA.csv")
#print(len(final_data))
#print(len(pd.date_range(start=defects_start_date, periods=len(df), freq=str(increments)+'D')))
final_data['defect_dates'] = pd.date_range(start=defects_start_date, periods=len(final_data), freq=str(increments)+'D')
final_data['feature_dates'] = pd.date_range(start=features_start_date, periods=len(final_data), freq=str(increments)+'D')
result['dates'] = list(final_data['defect_dates'].append(final_data['feature_dates']).sort_values().drop_duplicates().astype(str).values)
final_data['defect_dates'] = final_data['defect_dates'].astype(str)
final_data['feature_dates'] = final_data['feature_dates'].astype(str)
#print(final_data)
#exit()
#exit()
#result['dates'] = list(set(list(final_data['defect_dates']) + list(final_data['feature_dates'])))
result['predictions'] = final_data[['defect_dates', 'y_new']].rename(columns={'defect_dates': 'date', 'y_new':'value'}).dropna().to_dict(orient='records')
result['features'] = final_data[['feature_dates', 'FEATURES']].rename(columns={'feature_dates': 'date', 'FEATURES':'value'}).dropna().to_dict(orient='records')
result['actual'] = final_data[['defect_dates', 'ARRIVALS']].rename(columns={'defect_dates': 'date', 'ARRIVALS':'value'}).dropna().to_dict(orient='records')
#print(features)
#exit()
#print(final_data)
#print(best_index)
#print(defects_start_date)
#print(features_start_date)
#exit()
#p7_week = P7_NUM
#P7_Prediction = perform_interpolation([p7_week], x_new, y_new)[0]
#print(P7_Prediction)
return result
#print(final_data)
#final_data.to_csv("FINAl_DATA.csv")
def get_ssq(arrivals, x_new, y_new):
df1 = pd.DataFrame({'WEEK':[i+1 for i in range(len(arrivals))], 'ARRIVALS':arrivals})
min_week = int(math.ceil(np.min(x_new)))
max_week = int(math.floor(np.max(x_new)))
week_range = [i for i in range(min_week, max_week+1)]
#x_new = x_new[:len()]
#print("k: ", len(week_range))
#print(week_range)
#print(x_new)
#print("l: ", len(x_new))
#print("m: ", len(y_new))
new_values = perform_interpolation(week_range, x_new, y_new, roundoff=False)
#print(new_values)
#print(len(new_values))
df2 = pd.DataFrame({'D2':week_range, 'ARRIVALS_(Y_NEW)':new_values})
df = df1.merge(df2, how='outer', left_on='WEEK', right_on='D2')
df['ERROR'] = (df['ARRIVALS'] - df['ARRIVALS_(Y_NEW)'])**2
p = df.count()['ERROR']
#print("p: ", p)
ssq = round(math.sqrt(df['ERROR'].sum()/(p-2)), 3)
del df['D2']
return ssq
# def determine_ssq(final_data):
# #final_data['ARRIVALS_NEW'] = c * final_data['ARRIVALS']
# #print(len())
# min_week = int(math.ceil(final_data['WEEK_(X_NEW)'].min()))
# max_week = final_data['WEEK'].max()
# week_range = [i for i in range(min_week, max_week+1)]
# #print(max_week, min_week, len(week_range), week_range)
# row_data = []
# if len(week_range) < len(final_data):
# diff = len(final_data) - len(week_range)
# row_data = [None for i in range(diff)]
# row_data += perform_interpolation(week_range, final_data['WEEK_(X_NEW)'].values, final_data['ARRIVALS_(Y_NEW)'].values, roundoff=False)
# #print(row_data)
# if len(row_data) < len(final_data):
# diff = len(final_data) - len(row_data)
# nones = [None for i in range(diff)]
# row_data += nones
# #print(len(row_data))
# #print(len(final_data))
# #exit()
# final_data['SHIFTED_Y'] = row_data
# final_data['(Y_ACT-Y_PRED)^2'] = final_data['ARRIVALS'] - final_data['SHIFTED_Y']
# final_data['(Y_ACT-Y_PRED)^2'] = final_data['(Y_ACT-Y_PRED)^2']**2
# p = final_data.count()['(Y_ACT-Y_PRED)^2']
# print("p: ", p)
# ssq = round(math.sqrt(final_data['(Y_ACT-Y_PRED)^2'].sum()/(p-2)), 3)
# #print(final_data)
# #print("SSQ: ", ssq)
# return ssq, final_data
def create_qq_plot(feature_data, arrival_data):
# qq_plot_start = 5
# qq_plot_end = 100
# qq_plot_increment = 5
# qq_plot_limit = 0.3
max_feature = np.nanmax(feature_data)
max_defect = np.nanmax(arrival_data)
FEATURES_CDF = (feature_data/max_feature).round(5)
ARRIVALS_CDF = (arrival_data/max_defect).round(5)
w = [(i/100) for i in range(qq_plot_start,qq_plot_end,qq_plot_increment) if ((i/100) > np.nanmin(FEATURES_CDF)) and ((i/100) > np.nanmin(ARRIVALS_CDF))]
#print("w: ", w)
#prinr("W: ", w)
#print("CDF: ", FEATURES_CDF)
Q_features = perform_interpolation(w, FEATURES_CDF, [i+1 for i in range(len(feature_data))], roundoff=False)
Q_arrivals = perform_interpolation(w, ARRIVALS_CDF, [i+1 for i in range(len(arrival_data))], roundoff=False)
#print(Q_arrivals)
#print(Q_features)
#exit()
arrivals_95pct = perform_interpolation([0.95], ARRIVALS_CDF, arrival_data, roundoff=False)[0]
features_95pct = perform_interpolation([0.95], FEATURES_CDF, feature_data, roundoff=False)[0]
c = arrivals_95pct/features_95pct #ratio
QQ = | pd.DataFrame([[i] for i in w], columns=['p']) | pandas.DataFrame |
import streamlit as st
import altair as alt
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import plotly.express as px
from pathlib import Path
from functools import lru_cache
import statsmodels.formula.api as smf
from datetime import datetime
import pandasdmx as pdmx
plt.style.use(
"https://github.com/aeturrell/coding-for-economists/raw/main/plot_style.txt"
)
@st.cache
def prep_gdp_output_codes():
hdf = pd.read_excel(Path("data", "uk_gdp_output_hierarchy.xlsx"), header=None)
hdf = hdf.dropna(how="all", axis=1)
for i in range(3):
hdf.iloc[i, :] = hdf.iloc[i, :].fillna(method="ffill")
hdf = hdf.T
hdf["total"] = hdf[3].str.contains("Total")
hdf = hdf.query("total==False")
hdf = hdf.drop("total", axis=1)
for col in range(5):
hdf[col] = hdf[col].str.lstrip().str.rstrip()
hdf = hdf.rename(columns={4: "section", 5: "code"})
return hdf
def get_uk_regional_gdp():
# current year
latest_year = datetime.now().year - 1
# Tell pdmx we want OECD data
oecd = pdmx.Request("OECD")
# Set out everything about the request in the format specified by the OECD API
data = oecd.data(
resource_id="REGION_ECONOM",
key="1+2.UKC.SNA_2008.GDP.REG+CURR_PR.ALL.2017+2018+2019+2020/all?",
).to_pandas()
# example that works:
"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/REGION_ECONOM/1+2.GBR+UKC+UKC11+UKC12.SNA_2008.GDP.REG+CURR_PR+USD_PPP+REAL_PR+REAL_PPP+PC+PC_CURR_PR+PC_USD_PPP+PC_REAL_PR+PC_REAL_PPP.ALL.2001+2002+2003+2004+2005+2006+2007+2008+2009+2010+2011+2012+2013+2014+2015+2016+2017+2018+2019+2020/all?"
df = pd.DataFrame(data).reset_index()
df.head()
@st.cache
def ons_blue_book_data(code):
data = grab_ONS_time_series_data("BB", code)
xf = pd.DataFrame(pd.json_normalize(data["years"]))
xf = xf[["year", "value"]]
xf["year"] = xf["year"].astype(int)
xf["value"] = xf["value"].astype(float)
xf["title"] = data["description"]["title"]
xf["code"] = code
xf = pd.DataFrame(xf.loc[xf["year"].argmax(), :]).T
return xf
@st.cache
@lru_cache(maxsize=32)
def ons_get_gdp_output_with_breakdown():
df = prep_gdp_output_codes()
xf = pd.DataFrame()
for code in df["code"].unique():
xf = pd.concat([xf, ons_blue_book_data(code)], axis=0)
df = pd.merge(df, xf, on=["code"], how="inner")
# for later treemap use, only use highest level name if hierachy has
# missing levels
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
# now, any nones with non-none children must be swapped
df.loc[(df[2].isnull()) & (~df[3].isnull()), [2, 3]] = df.loc[
(df[2].isnull()) & (~df[3].isnull()), [3, 2]
].values
df.loc[(df[0] == df[1]), [1]] = df.loc[(df[0] == df[1]), [2]].values
df.loc[(df[1] == df[2]), [2]] = df.loc[(df[1] == df[2]), [3]].values
# another round of this
df.loc[(df[1] == df[2]) & (df[3] == df[2]) & (df[3] == df[0]), [3, 2, 1]] = None
df.loc[(df[1] == df[2]) & (df[3] == df[2]), [3, 2]] = None
df.loc[(df[1] == df[2]), [2]] = None
df.loc[(df[3] == df[2]), [3]] = None
return df
@st.cache
def grab_ONS_time_series_data(dataset_id, timeseries_id):
"""
This function grabs specified time series from the ONS API.
"""
api_endpoint = "https://api.ons.gov.uk/"
api_params = {"dataset": dataset_id, "timeseries": timeseries_id}
url = (
api_endpoint
+ "/".join(
[x + "/" + y for x, y in zip(api_params.keys(), api_params.values())][::-1]
)
+ "/data"
)
return requests.get(url).json()
def ons_clean_qna_data(data):
if data["quarters"] != []:
df = pd.DataFrame(pd.json_normalize(data["quarters"]))
df["date"] = (
pd.to_datetime(
df["date"].apply(lambda x: x[:4] + "-" + str(int(x[-1]) * 3)),
format="%Y-%m",
)
+ pd.tseries.offsets.MonthEnd()
)
else:
df = pd.DataFrame(pd.json_normalize(data["months"]))
df["date"] = (
pd.to_datetime(df["date"], format="%Y %b") + pd.tseries.offsets.MonthEnd()
)
df = df.drop([x for x in df.columns if x not in ["date", "value"]], axis=1)
return df
@lru_cache(maxsize=32)
def ons_qna_data(dataset_id, timeseries_id):
data = grab_ONS_time_series_data(dataset_id, timeseries_id)
desc_text = data["description"]["title"]
df = ons_clean_qna_data(data)
return df, desc_text
def visualize_line(df, x_axis, y_axis, scale, widths, ylabel, title):
height = 350
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=x_axis + ":T",
y=alt.Y(y_axis + ":Q", scale=scale, title=ylabel),
tooltip=[y_axis],
)
.properties(width=widths, title=title, height=height)
.interactive()
)
st.write(graph)
def plot_indices_of_output():
# Grab the three UK time series
indices_dicts = {"Production": "L2KQ", "Construction": "L2N8", "Services": "L2NC"}
df = pd.DataFrame()
for key, value in indices_dicts.items():
xf, x_text = ons_qna_data("QNA", value)
xf["Name"] = key
df = pd.concat([df, xf], axis=0)
graph = (
alt.Chart(df)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T"),
y="value:Q",
color=alt.Color("Name:N", legend=None),
tooltip=["value"],
)
.properties(
width=200,
height=200,
)
.facet(column="Name:N")
.interactive()
)
st.write(graph)
def plot_labour_market_indicators():
# The labour market. TODO change to monthly LMS (series codes are same)
indices_dicts_lms = {
"Employment": "LF24",
"Unemployment": "MGSX",
"Inactivity": "LF2S",
}
df_lms = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df_lms = pd.concat([df_lms, xf], axis=0)
graph_lms = (
alt.Chart(df_lms)
.mark_line(strokeWidth=4)
.encode(
x=alt.X("date:T", title=""),
y=alt.Y("value:Q", title="%"),
color="Name:N",
tooltip=["value"],
)
.properties(
title="Labour market indicators",
width=600,
)
.interactive()
)
st.write(graph_lms)
def plot_beveridge_curve():
indices_dicts_lms = {"Vacancies": "AP2Y", "Unemployment": "MGSX", "Active": "LF2K"}
df = pd.DataFrame()
for key, value in indices_dicts_lms.items():
xf, x_text = ons_qna_data("LMS", value)
xf["Name"] = key
df = | pd.concat([df, xf], axis=0) | pandas.concat |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s=pd.Series(s.values,name=col)
dfIDUniqueCols=pd.concat([dfIDUniqueCols,s],axis=1)
dfIDUniqueCols=dfIDUniqueCols[dfID.columns]
except:
logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDUniqueCols
def getIDsFromID(ID='Objects.3S_XYZ_SEG_INFO.3S_L_6_KED_39_EL1.In.AL_S',dfID=None,matchCols=['B','C1','C2','C3','C4','C5','D'],any=False):
"""
returns IDs matching ID
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
IDsMatching=[]
s=dfID.loc[ID,:]
for ID,row in dfID.iterrows():
match=True
for col in [col for col in row.index.values if col in matchCols]:
#if str(row[col])!=str(s[col]):
if row[col]!=s[col]:
match=False
break
else:
if any:
break
if match:
IDsMatching.append(ID)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
#except:
# logger.error("{0:s}".format(logStr))
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return sorted(IDsMatching)
def getLDSResVecDf(
ID # ResVec-Defining-Channel; i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In.AL_S / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.AL_S
,dfID
,TCsLDSResDf
,matchCols # i.e. ['B','C1','C2','C3','C4','C5','C6','D'] for Segs; i.e. ['B','C','D'] for Drks
):
"""
returns a df with LDSResChannels as columns (AL_S, ...); derived by Filtering columns from TCsLDSResDf and renaming them
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
IDs=getIDsFromID(ID=ID,dfID=dfID,matchCols=matchCols)
dfFiltered=TCsLDSResDf.filter(items=IDs)
colDct={}
for col in dfFiltered.columns:
m=re.search(pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except:
logger.error("{0:s}".format(logStr))
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetFirstAndLastValidIdx(df):
"""
returns (tFirst,tLast)
"""
for idx,col in enumerate(df.columns):
tF=df[col].first_valid_index()
tL=df[col].last_valid_index()
if idx==0:
tFirst=tF
tLast=tL
else:
if tF < tFirst:
tFirst=tF
if tL > tLast:
tLast=tL
return (tFirst,tLast)
def fGetIDSets(
dfID
,divNr #'7'
,pipelineNrLst #['43','44']
,fctIn=None # Funktion von ID die Falsch heraus gibt, wenn ID (doch) nicht in Menge sein soll
):
# returns Dct: key: Bezeichner einer ID-Menge; value: zugeh. IDs
IDSets={}
IDs=[]
for ID in sorted(dfID.index.unique()):
m=re.search(pID,ID)
if m != None:
C1= m.group('C1')
C2= m.group('C2')
C3= m.group('C3')
C4= m.group('C4')
C5= m.group('C5')
if C1 in [divNr] and C3 in pipelineNrLst: # u.a. SEG ErgVecs
IDs.append(ID)
elif C2 in [divNr] and C4 in pipelineNrLst:
IDs.append(ID)
elif C3 in [divNr] and C5 in pipelineNrLst: # FT, PTI, etc.
IDs.append(ID)
if fctIn != None:
IDs=[ID for ID in IDs if fctIn(ID)]
IDSets['IDs']=IDs
IDsAlarm=[ID for ID in IDs if re.search(pID,ID).group('E') == 'AL_S']
IDSets['IDsAlarm']=IDsAlarm
IDsAlarmSEG=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsAlarmSEG']=IDsAlarmSEG
IDsAlarmDruck=[ID for ID in IDsAlarm if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsAlarmDruck']=IDsAlarmDruck
IDsStat=[ID for ID in IDs if re.search(pID,ID).group('E') == 'STAT_S']
IDSets['IDsStat']=IDsStat
IDsStatSEG=[ID for ID in IDsStat if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsStatSEG']=IDsStatSEG
IDsStatDruck=[ID for ID in IDsStat if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsStatDruck']=IDsStatDruck
###
IDsSb=[ID for ID in IDs if re.search(pID,ID).group('E') == 'SB_S']
IDSets['IDsSb']=IDsSb
IDsSbSEG=[ID for ID in IDsSb if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsSbSEG']=IDsSbSEG
IDsSbDruck=[ID for ID in IDsSb if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsSbDruck']=IDsSbDruck
###
IDsZHK=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZHKNR_S']
IDSets['IDsZHK']=IDsZHK
IDsZHKSEG=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') != 'PTI']
IDSets['IDsZHKSEG']=IDsZHKSEG
IDsZHKDruck=[ID for ID in IDsZHK if re.search(pID,ID).group('C5') == 'PTI']
IDSets['IDsZHKDruck']=IDsZHKDruck
IDsFT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'FT']
IDSets['IDsFT']=IDsFT
IDsPT=[ID for ID in IDs if re.search(pID,ID).group('C4') == 'PTI']
IDSets['IDsPT']=IDsPT
IDsPT_BCIND=[ID for ID in IDs if re.search(pID,ID).group('C5') == 'PTI' and re.search(pID,ID).group('E') == 'BCIND_S' ]
IDSets['IDsPT_BCIND']=IDsPT_BCIND
### Schieber
IDsZUST=[ID for ID in IDs if re.search(pID,ID).group('E') == 'ZUST']
IDsZUST=sorted(IDsZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDsZUST']=IDsZUST
IDs_3S_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == '3S_FBG_ESCHIEBER']
IDs_3S_XYZ_ESCHIEBER=sorted(IDs_3S_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C6'))
IDSets['IDs_3S_XYZ_ESCHIEBER']=IDs_3S_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER=[ID for ID in IDs if re.search(pID,ID).group('B') == 'FBG_ESCHIEBER']
IDs_XYZ_ESCHIEBER=sorted(IDs_XYZ_ESCHIEBER,key=lambda x: re.match(pID,x).group('C5')) #
IDSets['IDs_XYZ_ESCHIEBER']=IDs_XYZ_ESCHIEBER
IDs_XYZ_ESCHIEBER_Ohne_ZUST=[ID for ID in IDs_XYZ_ESCHIEBER if re.search(pID,ID).group('E') != 'ZUST']
IDs_XYZ_ESCHIEBER_Ohne_ZUST=sorted(IDs_XYZ_ESCHIEBER_Ohne_ZUST,key=lambda x: re.match(pID,x).group('C5'))
IDSets['IDs_XYZ_ESCHIEBER_Ohne_ZUST']=IDs_XYZ_ESCHIEBER_Ohne_ZUST
IDsSchieberAlle=IDsZUST+IDs_XYZ_ESCHIEBER_Ohne_ZUST+IDs_3S_XYZ_ESCHIEBER
IDSets['IDsSchieberAlle']=IDsSchieberAlle
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlle if re.search('LAEUFT$',ID) == None]
IDsSchieberAlleOhneLAEUFT=[ID for ID in IDsSchieberAlleOhneLAEUFT if re.search('LAEUFT_NICHT$',ID) == None]
IDSets['IDsSchieberAlleOhneLAEUFT']=IDsSchieberAlleOhneLAEUFT
return IDSets
h5KeySep='/'
def fValueFct(x):
return pd.to_numeric(x,errors='ignore',downcast='float')
class AppLog():
"""
SIR 3S App Log (SQC Log)
Maintains a H5-File.
Existing H5-File will be deleted (if not initialized with h5File=...).
H5-Keys are:
* init
* lookUpDf
* lookUpDfZips (if initialized with zip7Files=...)
* Logfilenames praefixed by Log without extension
Attributes:
* h5File
* lookUpDf
zipName
logName
FirstTime (ScenTime - not #LogTime)
LastTime (ScenTime - mot #LogTime)
* lookUpDfZips
"""
TCsdfOPCFill=False # wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
@classmethod
def getTCsFromDf(cls,df,dfID=pd.DataFrame(),TCsdfOPCFill=TCsdfOPCFill):
"""
returns several TC-dfs from df
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
Args:
* df: a df with Log-Data
* columns: ['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']
* dfID
* index: ID
* erf. nur, wenn IDs nach Res1 und Res2 aufgeteilt werden sollen
* TCsdfOPCFill: if True (default): fill NaNs in this df
Time curve dfs: cols:
* Time (TCsdfOPC: ProcessTime, other: ScenTime)
* ID
* Value
Time curve dfs:
* TCsdfOPC
* TCsSirCalc
* TCsLDSIn
* TCsLDSRes (dfID empty) or TCsLDSRes1, TCsLDSRes2
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if not dfID.empty:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
if not dfID.empty:
df=df.merge(dfID,how='left',left_on='ID',right_index=True,suffixes=('','_r'))
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=df[(df['SubSystem'].str.contains('^OPC'))
### & ~(df['Value'].isnull()) # ueberfluessig, wenn df dies bereits erfuellt
][['ProcessTime','ID','Value']].pivot_table(index='ProcessTime', columns='ID', values='Value',aggfunc='last')
if TCsdfOPCFill:
for col in TCsdfOPC.columns:
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='ffill')
TCsdfOPC[col]=TCsdfOPC[col].fillna(method='bfill')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=df[(df['SubSystem'].str.contains('^SirCalc')) | (df['SubSystem'].str.contains('^RTTM')) ][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^<-'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
if not dfID.empty:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_SEG_INFO'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->')) & (df['B'].str.contains('^3S_FBG_DRUCK'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
else:
logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=df[(df['SubSystem'].str.contains('^LDS')) & (df['Direction'].str.contains('^->'))][['ScenTime','ID','Value']].pivot_table(index='ScenTime', columns='ID', values='Value',aggfunc='last')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
def __init__(self,logFile=None,zip7File=None,h5File=None,h5FileName=None,readWithDictReader=False,nRows=None,readWindowsLog=False):
"""
(re-)initialize
logFile:
wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
zipFile:
1. logFile wird gelesen und in H5 abgelegt
addZip7File(zip7File) liest alle Logs eines zipFiles und legt diese in H5 ab
die Initialisierung mit zipFile ist identisch mit der Initialisierung mit logFile wenn logFile das 1. logFile des Zips ist
nach addZip7File(zip7File) - ggf. mehrfach fuer mehrere Zips:
koennen Daten mit self.get(...) gelesen werden (liefert 1 df)
koennen Daten mit self.getTCs(...) gelesen werden (liefert mehrere dfs in TC-Form)
koennen Daten mit self.getTCsSpecified(...) gelesen werden (liefert 1 df in TC-Form)
koennen Daten in TC-Form mit self.extractTCsToH5s(...) in separate H5s gelesen werden
mit self.getTCsFromH5s(...) koennen die TCs wieder gelesen werden
=== addZip7File(zip7File) - ggf. mehrfach - und extractTCsToH5s(...) sind Bestandteil einer 7Zip-Verarbeitung vor der eigentlichen Analyse ===
h5File:
die lookUp-Dfs vom H5-File werden gelesen
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt
die TC-H5-Files werden nicht auf Existenz geprüft oder gar gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
self.lookUpDf=pd.DataFrame()
self.lookUpDfZips=pd.DataFrame()
try:
if logFile != None and zip7File != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'3 Files (logFile and zip7File and h5File) specified.'))
elif logFile != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and zip7File) specified.'))
elif logFile != None and h5File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (logFile and h5File) specified.'))
elif h5File != None and zip7File != None:
logger.debug("{0:s}{1:s}".format(logStr,'2 Files (h5File and zip7File) specified.'))
elif logFile != None:
self.__initlogFile(logFile,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif zip7File != None:
self.__initzip7File(zip7File,h5FileName=h5FileName,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
elif h5File != None:
self.__initWithH5File(h5File)
else:
logger.debug("{0:s}{1:s}".format(logStr,'No File (logFile XOR zip7File XOR h5File) specified.'))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initlogFile(self,logFile,h5FileName=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with logFile
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn logFile nicht existiert ...
if not os.path.exists(logFile):
logger.debug("{0:s}logFile {1:s} not existing.".format(logStr,logFile))
else:
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
self.__initH5File(logFile,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initH5File(self,h5File,df,h5FileName=None):
"""
creates self.h5File and writes 'init'-Key Logfile df to it
Args:
* h5File: name of logFile or zip7File; the Dir is the Dir of the H5-File
* df
* h5FileName: the H5-FileName without Dir and Extension; if None (default), "Log ab ..." is used
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(h5File)
# H5-File
if h5FileName==None:
h5FileTail="Log ab {0:s}.h5".format(str(df['#LogTime'].min())).replace(':',' ').replace('-',' ')
else:
h5FileTail=h5FileName+'.h5'
self.h5File=os.path.join(h5FileHead,h5FileTail)
# wenn H5 existiert wird es geloescht
if os.path.exists(self.h5File):
os.remove(self.h5File)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileTail))
# init-Logfile schreiben
self.__toH5('init',df)
logger.debug("{0:s}'init'-Key Logfile done.".format(logStr))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __initWithH5File(self,h5File,useRawHdfAPI=False):
"""
self.h5File=h5File
self.lookUpDf
self.lookUpDfZips
die lookUp-Dfs werden gelesen vom H5-File
die zum H5-File zugehoerigen TC-H5-Filenamen werden belegt, wenn diese H5-Files existieren
die TC-H5-Files werden nicht gelesen
der zum H5-File zugehoerige CVD-Filename wird belegt, wenn das H5-File existiert
das H5-File wird nicht gelesen
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(h5File):
self.h5File=h5File
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=h5Store['lookUpDf']
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=h5Store['lookUpDfZips']
else:
if 'lookUpDf' in h5KeysStripped:
self.lookUpDf=pd.read_hdf(self.h5File, key='lookUpDf')
if 'lookUpDfZips' in h5KeysStripped:
self.lookUpDfZips=pd.read_hdf(self.h5File, key='lookUpDfZips')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
#TC-H5s
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
h5FileOPC=name+TCPost+'OPC'+ext
h5FileSirCalc=name+TCPost+'SirCalc'+ext
h5FileLDSIn=name+TCPost+'LDSIn'+ext
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
h5FileLDSRes=name+TCPost+'LDSRes'+ext
if os.path.exists(h5FileOPC):
self.h5FileOPC=h5FileOPC
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileOPC))
if os.path.exists(h5FileSirCalc):
self.h5FileSirCalc=h5FileSirCalc
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileSirCalc))
if os.path.exists(h5FileLDSIn):
self.h5FileLDSIn=h5FileLDSIn
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSIn))
if os.path.exists(h5FileLDSRes):
self.h5FileLDSRes=h5FileLDSRes
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes))
if os.path.exists(h5FileLDSRes1):
self.h5FileLDSRes1=h5FileLDSRes1
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes1))
if os.path.exists(h5FileLDSRes2):
self.h5FileLDSRes2=h5FileLDSRes2
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileLDSRes2))
h5FileCVD=name+'_'+'CVD'+ext
if os.path.exists(h5FileCVD):
self.h5FileCVD=h5FileCVD
logger.debug("{0:s}Existing H5-File {1:s}.".format(logStr,self.h5FileCVD))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getInitDf(self,useRawHdfAPI=False):
"""
returns InitDf from H5-File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
df=pd.DataFrame()
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
if 'init' in h5KeysStripped:
df=h5Store['init']
else:
if 'init' in h5KeysStripped:
df=pd.read_hdf(self.h5File, key='init')
else:
logStrFinal="{0:s}h5File {1:s} not existing.".format(logStr,h5File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def __initzip7File(self,zip7File,h5FileName=None,nRows=None,readWithDictReader=False,readWindowsLog=False):
"""
(re-)initialize with zip7File
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
zipFileDirname=os.path.dirname(zip7File)
logger.debug("{0:s}zipFileDirname: {1:s}".format(logStr,zipFileDirname))
aDfRead=False
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,len(allLogFiles)))
logger.debug("{0:s}getnames(): {1:s}.".format(logStr,str(allLogFiles)))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
logger.debug("{0:s}idx: {1:d} logFileNameInZip: {2:s}".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(zipFileDirname,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile) # logFileHead == dirname()
logger.debug("{0:s}idx: {1:d} logFileHead: {2:s} logFileTail: {3:s}".format(logStr,idx,logFileHead,logFileTail))
(name, ext)=os.path.splitext(logFile)
logger.debug("{0:s}idx: {1:d} name: {2:s} ext: {3:s}".format(logStr,idx,name,ext))
if logFileHead!='': # logFileHead == dirname()
if os.path.exists(logFileHead) and logFileHead not in extDirLstExistingLogged:
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert bereits.".format(logStr,idx,logFileHead))
extDirLstExistingLogged.append(logFileHead)
elif not os.path.exists(logFileHead):
logger.debug("{0:s}idx: {1:d} Verz. logFileHead: {2:s} existiert noch nicht.".format(logStr,idx,logFileHead))
extDirLstTBDeleted.append(logFileHead)
# kein Logfile zu prozessieren ...
if ext == '':
continue
# Logfile prozessieren ...
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=zipFileDirname,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
aDfRead=True
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# wir wollen nur das 1. File lesen ...
if aDfRead:
break;
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
self.__initH5File(zip7File,df,h5FileName=h5FileName)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __toH5(self,key,df,useRawHdfAPI=False,updLookUpDf=False,logName='',zipName='',noDfStorage=False):
"""
write df with key to H5-File (if not noDfStorage)
Args:
* updLookUpDf: if True, self.lookUpDf is updated with
* zipName (the Zip of logFile)
* logName (the name of the logFile i.e. 20201113_0000004.log)
* FirstTime (the first ScenTime in df)
* LastTime (the last ScenTime in df)
self.lookUpDf is not wriiten to H5
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
(h5FileHead,h5FileTail)=os.path.split(self.h5File)
if not noDfStorage:
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
try:
h5Store.put(key,df)
except Exception as e:
logger.error("{0:s}Writing df with h5Key={1:s} to {2:s} FAILED!".format(logStr,key,h5FileTail))
raise e
else:
df.to_hdf(self.h5File, key=key)
logger.debug("{0:s}Writing df with h5Key={1:s} to {2:s} done.".format(logStr,key,h5FileTail))
if updLookUpDf:
s=df['ScenTime']#['#LogTime']
FirstTime=s.iloc[0]
LastTime=s.iloc[-1]
if self.lookUpDf.empty:
data={ 'zipName': [zipName]
,'logName': [logName]
,'FirstTime' : [FirstTime]
,'LastTime' : [LastTime]
}
self.lookUpDf = pd.DataFrame (data, columns = ['zipName','logName','FirstTime','LastTime'])
self.lookUpDf['zipName']=self.lookUpDf['zipName'].astype(str)
self.lookUpDf['logName']=self.lookUpDf['logName'].astype(str)
else:
data={ 'zipName': zipName
,'logName': logName
,'FirstTime' : FirstTime
,'LastTime' : LastTime
}
self.lookUpDf=self.lookUpDf.append(data,ignore_index=True)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def __processALogFile(self,logFile=None,delimiter='\t',nRows=None,readWithDictReader=False,fValueFct=fValueFct,readWindowsLog=False):
"""
process logFile
Args:
* logFile: logFile to be processed
* nRows: number of logFile rows to be processed; default: None (:= all rows are processed); if readWithDictReader: last row is also processed
* readWithDictReader: if True, csv.DictReader is used; default: None (:= pd.read_csv is used)
Returns:
* df: logFile processed to df
* converted:
* #LogTime: to datetime
* ProcessTime: to datetime
* Value: to float64
* ID,Direction,SubSystem,LogLevel,State,Remark: to str
* new:
* ScenTime datetime
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
df=None
try:
with open(logFile,'r') as f:
pass
(logFileHead,logFileTail)=os.path.split(logFile)
if readWithDictReader:
restkey='+'
with open(logFile,"r") as csvFile: # 1. Zeile enthaelt die Ueberschrift
reader = csv.DictReader(csvFile,delimiter=delimiter,restkey=restkey)
logger.debug("{0:s}{1:s} csv.DictReader reader processed.".format(logStr,logFileTail))
# If a row has more fields than fieldnames, the remaining data is put in a list and stored with the fieldname specified by restkey.
colNames=reader.fieldnames
dcts = [dct for dct in reader] # alle Zeilen lesen
logger.debug("{0:s}{1:s} csv.DictReader-Ergebnis processed.".format(logStr,logFileTail))
if nRows!=None:
dcts=dcts[0:nRows]+[dcts[-1]]
# nur die Spaltennamen werden als row-Spalten erzeugt
rows = [[dct[colName] for colName in colNames] for dct in dcts]
logger.debug("{0:s}{1:s} rows processed.".format(logStr,logFileTail))
# die "ueberfluessigen" Spalten an die letzte Spalte dranhaengen
for i, dct in enumerate(dcts):
if restkey in dct:
restValue=dct[restkey]
restValueStr = delimiter.join(restValue)
newValue=rows[i][-1]+delimiter+restValueStr
#logger.debug("{0:s}{1:s} restValueStr: {2:s} - Zeile {3:10d}: {4:s} - neuer Wert letzte Spalte: {5:s}.".format(logStr,logFileTail,restValueStr,i,str(rows[i]),newValue))
rows[i][-1]=rows[i][-1]+newValue
logger.debug("{0:s}{1:s} restkey processed.".format(logStr,logFileTail))
index=range(len(rows))
df = pd.DataFrame(rows,columns=colNames,index=index)
else:
if nRows==None:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False)
else:
df=pd.read_csv(logFile,delimiter=delimiter,error_bad_lines=False,warn_bad_lines=True,low_memory=False,nrows=nRows)
logger.debug("{0:s}{1:s} pd.DataFrame processed.".format(logStr,logFileTail))
#logger.debug("{0:s}df: {1:s}".format(logStr,str(df)))
#LogTime
df['#LogTime']=pd.to_datetime(df['#LogTime'],unit='ms',errors='coerce') # NaT
#ProcessTime
df['ProcessTime']=pd.to_datetime(df['ProcessTime'],unit='ms',errors='coerce') # NaT
logger.debug("{0:s}{1:s} col ProcessTime processed.".format(logStr,logFileTail))
#Value
df['Value']=df.Value.str.replace(',', '.') # Exception: Line: 1137: <class 'AttributeError'>: Can only use .str accessor with string values!
df['Value']=fValueFct(df['Value'].values) # df['ValueProcessed'].apply(fValueFct)
logger.debug("{0:s}{1:s} col Value processed.".format(logStr,logFileTail))
#Strings
for col in ['ID','Direction','SubSystem','LogLevel','State','Remark']:
df[col]=df[col].astype(str)
logger.debug("{0:s}{1:s} String-cols processed.".format(logStr,logFileTail))
#1618249551621 STD CVD 1615442324000 p-p BEGIN_OF_NEW_CONTROL_VOLUME 6-10-SV1-RB~6-10-BID-RB NULL NULL # String in beiden Faellen (Linux und Windows) gleich?
#1618249551621 STD CVD <- 156 CV_ID
##ScenTime
## SubSystem Direction ProcessTime ID Value State Remark
## Linux ---
## 1615029280000 INF SQC Starting cycle for 2021-03-06 12:14:38.000
## 1615029280000 STD LDS MCL 1615029278000 Main cycle loop 06.03.2021 12:14:38.000 (ScenTime: Tag und Zeit in Klartext; Spalte ProcessTime ScenTime!)
## Windows ---
## 1618256150711 STD SQC 1615457121000 Main cycle loop 11:05:21.000 (ScenTime-Zeit in Klartext; Spalte ProcessTime ScenTime!)
dfScenTime=df[df['ID']=='Main cycle loop'][['ProcessTime']]
dfScenTime.rename(columns={'ProcessTime':'ScenTime'},inplace=True)
df=df.join(dfScenTime)
df['ScenTime']=df['ScenTime'].fillna(method='ffill')
df['ScenTime']=df['ScenTime'].fillna(method='bfill')
if df['ScenTime'].isnull().values.all():
logger.debug("{0:s}Keine Zeile mit ID=='Main cycle loop' gefunden. ScenTime zu #LogTime gesetzt.".format(logStr))
df['ScenTime']=df['#LogTime'] # wenn keine Zeile mit ID=='Main cycle loop' gefunden wurde, wird ScenTime zu #LogTime gesetzt
# finalisieren
df=df[['#LogTime','LogLevel','SubSystem','Direction','ProcessTime','ID','Value','ScenTime','State','Remark']]
logger.debug("{0:s}{1:s} processed with nRows: {2:s} (None if all).".format(logStr,logFileTail,str(nRows)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return df
def rebuildLookUpDfZips(self,zip7Files,readWithDictReader=True,readWindowsLog=False):
"""
(re-)initialize with zip7Files
only persistent outcome is lookUpDfZips (Attribute and H5-Persistence)
lookUpdf is changed but not H5-stored
(Re-)Init with AppLog(h5File=...) after using rebuildLookUpDfZips to obtain old lookUpdf
main Usage of rebuildLookUpDfZips is to determine which zip7Files to add by i.e.:
zip7FilesToAdd=lx.lookUpDfZips[~(lx.lookUpDfZips['LastTime']<timeStartAusschnitt) & ~(lx.lookUpDfZips['FirstTime']>timeEndAusschnitt)].index.to_list()
"""
#noDfStorage=False
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
#self.__initzip7File(zip7File=zip7Files[0],h5FileName=h5FileName,nRows=1,readWithDictReader=True)
for zip7File in zip7Files:
logger.info("{0:s}addZip7File: {1:s}".format(logStr,zip7File))
self.addZip7File(zip7File,firstsAndLastsLogsOnly=True,nRows=1,readWithDictReader=readWithDictReader,noDfStorage=True,readWindowsLog=readWindowsLog)
logger.debug("{0:s}lookUpDf: {1:s}".format(logStr,self.lookUpDf.to_string()))
df=self.lookUpDf.groupby(by='zipName').agg(['min', 'max'])
logger.debug("{0:s}df: {1:s}".format(logStr,df.to_string()))
minTime=df.loc[:,('FirstTime','min')]
maxTime=df.loc[:,('LastTime','max')]
minFileNr=df.loc[:,('logName','min')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
maxFileNr=df.loc[:,('logName','max')].apply(lambda x: int(re.search(logFilenamePattern,x).group(3)))
s=(maxTime-minTime)/(maxFileNr-minFileNr)
lookUpDfZips=s.to_frame().rename(columns={0:'TimespanPerLog'})
lookUpDfZips['NumOfFiles']=maxFileNr-minFileNr
lookUpDfZips['FirstTime']=minTime
lookUpDfZips['LastTime']=maxTime
lookUpDfZips['minFileNr']=minFileNr
lookUpDfZips['maxFileNr']=maxFileNr
lookUpDfZips=lookUpDfZips[['FirstTime','LastTime','TimespanPerLog','NumOfFiles','minFileNr','maxFileNr']]
# lookUpDfZips schreiben
self.lookUpDfZips=lookUpDfZips
self.__toH5('lookUpDfZips',self.lookUpDfZips)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def addZip7File(self,zip7File,firstsAndLastsLogsOnly=False,nRows=None,readWithDictReader=False,noDfStorage=False,readWindowsLog=False):
"""
add zip7File
Args:
* zipFile: zipFile which LogFiles shall be added
* Args for internal Usage:
* firstsAndLastsLogsOnly (True dann)
* nRows (1 dann)
* readWithDictReader (True dann)
d.h. es werden nur die ersten und letzten Logs pro Zip angelesen und dort auch nur die 1. und letzte Zeile und das mit DictReader
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
else:
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
logger.debug("{0:s}zip7FileHead (leer wenn zip7 im selben Verz.): {1:s} zip7FileTail: {2:s}.".format(logStr,zip7FileHead,zip7FileTail))
logger.info("{0:s}zip7File: {1:s} ...".format(logStr,zip7File))
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
for idx,logFileNameInZip in enumerate(allLogFiles):
if firstsAndLastsLogsOnly:
if idx not in [0,1,allLogFilesLen-2,allLogFilesLen-1]:
#logger.debug("{0:s}idx: {1:d} item: {2:s} NOT processed ...".format(logStr,idx,logFileNameInZip))
continue
logger.info("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
logger.debug("{0:s}Log: {1:s} wird extrahiert ... ".format(logStr,logFileTail))
import lzma
try:
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
except lzma.LZMAError:
logger.warning("{0:s}Log: {1:s} nicht erfolgreich extrahiert - continue ... ".format(logStr,logFileTail))
continue
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}Log: {1:s} wurde extrahiert. ".format(logStr,logFileTail))
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,nRows=nRows,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
# ...
(name, ext)=os.path.splitext(logFileTail)
key='Log'+name
if zip7FileHead != '':
zipName=os.path.join(os.path.relpath(zip7FileHead),zip7FileTail)
else:
zipName=zip7FileTail
# df schreiben
self.__toH5(key,df,updLookUpDf=True,logName=logFileTail,zipName=zipName,noDfStorage=noDfStorage)#os.path.join(os.path.relpath(zip7FileHead),zip7FileTail))
# danach gleich lookUpDf schreiben ...
self.__toH5('lookUpDf',self.lookUpDf,noDfStorage=noDfStorage)
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def getTotalLogTime(self):
"""
Returns Tuple: firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal # Brutto-Logzeit, Netto-Logzeit, Summe aller Zeiten zwischen 2 Logdateien (sollte = Brutto-Netto sein)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# Inhalt der Logs
tdTotal=pd.Timedelta('0 Seconds')
tdBetweenFilesTotal=pd.Timedelta('0 Seconds')
for idx,(index,row) in enumerate(self.lookUpDf.iterrows()):
if idx > 0:
tdBetweenFiles=row["FirstTime"]-lastTime
tdBetweenFilesTotal=tdBetweenFilesTotal+tdBetweenFiles
if tdBetweenFiles > pd.Timedelta('0 second'):
if tdBetweenFiles > pd.Timedelta('1 second'):
logger.info("{:s}Zeitdifferenz: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
pass
if tdBetweenFiles < pd.Timedelta('0 second'):
if tdBetweenFiles < -pd.Timedelta('1 second'):
pass
logger.info("{:s}Zeitueberlappung > 1s: {!s:s} zwischen {:s} ({:s}) und {:s} ({:s})".format(logStr,
str(tdBetweenFiles).replace('days','Tage')
,lastFile,lastZip
,row["logName"],row["zipName"]
))
td=row["LastTime"]-row["FirstTime"]
if type(td) == pd.Timedelta:
tdTotal=tdTotal+td
else:
print(index)# Fehler!
lastTime=row["LastTime"]
lastFile=row["logName"]
lastZip=row["zipName"]
firstTime=self.lookUpDf.iloc[0]["FirstTime"]
lastTime=self.lookUpDf.iloc[-1]["LastTime"]
tdTotalGross=lastTime-firstTime
tdTotalGross,tdTotal,tdBetweenFilesTotal
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return firstTime,lastTime,tdTotalGross,tdTotal,tdBetweenFilesTotal
def extractTCsToH5s(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill):
"""
extracts TC-Data (and CVD-Data) from H5 to seperate H5-Files (Postfixe: _TCxxx.h5 and _CVD.h5)
TCsdfOPCFill: wenn Wahr, werden in TCsdfOPCFill die NULLen aufgefuellt; default: Falsch
wenn timeStart != None: es wird an exisitierende .h5s angehaengt; sonst werden diese ueberschrieben
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# _TCxxx.h5 anlegen (OPC, SirCalc, LDSIn, LDSRes1, LDSRes2 (,LDSRes)) and _CVD.h5
# ueber alle dfs in H5 (unter Berücksichtigung von timeStart und timeEnd)
# lesen
# TC-Teilmenge ermitteln: 'ID','ProcessTime','ScenTime','SubSystem','Value','Direction'
# Zeilen mit 'Value' isnull() werden NICHT gelesen
# d.h. bei einer Logfile-Semantik welche durch NULL-Zeilen einen Wert auf (was auch immer) zuruecksetzt wuerde der Wert bei einer Stop-Plot-Ausgabe auf dem letzten Nicht-NULL Wert verharren ...
# ... zunaechst ...
# Untermengen bilden: ['TCsdfOPC','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2' (,'TCsdfLDSRes')]
# ... NULLen (NaNs) entstehen durch die Pivotierung mit Index = Time: nicht fuer alles Times (Obermenge) gibt es fuer jede ID Values
# speichern
(name,ext)=os.path.splitext(self.h5File)
TCPost='_TC'
self.h5FileOPC=name+TCPost+'OPC'+ext
self.h5FileSirCalc=name+TCPost+'SirCalc'+ext
self.h5FileLDSIn=name+TCPost+'LDSIn'+ext
if not dfID.empty:
# Attribute
self.h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
self.h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
# Komplement wird geloescht
h5FileLDSRes=name+TCPost+'LDSRes'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes):
os.remove(h5FileLDSRes)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes))
del self.h5FileLDSRes
except:
pass
else:
# Attribut
self.h5FileLDSRes=name+TCPost+'LDSRes'+ext
# Komplemente werden geloescht
h5FileLDSRes1=name+TCPost+'LDSRes1'+ext
h5FileLDSRes2=name+TCPost+'LDSRes2'+ext
try:
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes1):
os.remove(h5FileLDSRes1)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes1))
# wenn TC-H5 existiert wird es geloescht
if os.path.exists(h5FileLDSRes2):
os.remove(h5FileLDSRes2)
logger.debug("{0:s}Existing H5-File {1:s} deleted.".format(logStr,h5FileLDSRes2))
del self.h5FileLDSRes1
del self.h5FileLDSRes2
except:
pass
self.h5FileCVD=name+'_'+'CVD'+ext
h5Keys,h5KeysPost=self.__getH5Keys(timeStart=timeStart,timeEnd=timeEnd)
h5KeysOPC=['TCsOPC'+x for x in h5KeysPost]
h5KeysSirCalc=['TCsSirCalc'+x for x in h5KeysPost]
h5KeysLDSIn=['TCsLDSIn'+x for x in h5KeysPost]
h5KeysLDSRes1=['TCsLDSRes1'+x for x in h5KeysPost]
h5KeysLDSRes2=['TCsLDSRes2'+x for x in h5KeysPost]
h5KeysLDSRes=['TCsLDSRes'+x for x in h5KeysPost]
h5KeysCVD=['CVDRes'+x for x in h5KeysPost]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes,h5KeysCVD)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes,h5KeyCVD) in enumerate(h5KeysAll):
#H5-Write-Modus
if idx==0:
if timeStart!=None:
mode='a'
else:
mode='w'
else:
mode='a'
logger.info("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
# CVD -------------------------------------------------------------------------------------------------
dfCVD=df[df['SubSystem']=='CVD']
df=df[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
df['Value']=df['Value'].apply(lambda x: fTCCast(x))
df=df[~(df['Value'].isnull())]
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
logger.debug("{0:s}{1:s}".format(logStr,'Write ...'))
TCsdfOPC.to_hdf(self.h5FileOPC,h5KeyOPC, mode=mode)
TCsdfSirCalc.to_hdf(self.h5FileSirCalc,h5KeySirCalc, mode=mode)
TCsdfLDSIn.to_hdf(self.h5FileLDSIn,h5KeyLDSIn, mode=mode)
if not dfID.empty:
TCsdfLDSRes1.to_hdf(self.h5FileLDSRes1,h5KeyLDSRes1, mode=mode)
TCsdfLDSRes2.to_hdf(self.h5FileLDSRes2,h5KeyLDSRes2, mode=mode)
else:
TCsdfLDSRes.to_hdf(self.h5FileLDSRes,h5KeyLDSRes, mode=mode)
# ---
dfCVD.to_hdf(self.h5FileCVD,h5KeyCVD, mode=mode)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return
def shrinkH5File(self):
"""
die dfs werden geloescht im H5-File
extract TCs to H5s ### MUSS ### vorher gelaufen sein
nach shrinkH5File stehen im Master-H5 die eigentlichen Daten nicht mehr zur Verfuegung
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
# H5 existiert
if os.path.exists(self.h5File):
# Keys available
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys()) # /Log20201216_0000001
logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
for key in h5Keys:
if re.match('(^/Log)',key):
logger.debug("{0:s}key removed: {1:s}".format(logStr,str(key)))
h5Store.remove(key.replace(h5KeySep,''))
else:
logger.debug("{0:s}key NOT removed: {1:s}".format(logStr,str(key)))
with pd.HDFStore(self.h5File) as h5Store:
pass
shrinkCmd="ptrepack --chunkshape=auto --propindexes --complib=blosc "+self.h5File+" "+self.h5File+".Shrinked"
logger.debug("{0:s}shrinkCmd: {1:s}".format(logStr,shrinkCmd))
if os.path.exists(self.h5File+".Shrinked"):
os.remove(self.h5File+".Shrinked")
os.system(shrinkCmd)
os.remove(self.h5File)
os.rename(self.h5File+".Shrinked",self.h5File)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
def get(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,useRawHdfAPI=False):
"""
returns df with filter_fct applied
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
if useRawHdfAPI:
with pd.HDFStore(self.h5File) as h5Store:
for h5Key in h5Keys:
logger.debug("{0:s}Get (pd.HDFStore) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=h5Store[h5Key]
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
else:
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
df=pd.read_hdf(self.h5File, key=h5Key)
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getFromZips(self,timeStart=None,timeEnd=None,filter_fct=None,filterAfter=True,readWithDictReader=False,readWindowsLog=False):
"""
returns df from Zips
die Daten werden von den Zips gelesen: Log extrahieren, parsen, wieder loeschen
die Initalisierung muss mit AppLog(zip7Files=...) erfolgt sein da nur dann self.lookUpDfZips existiert
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfRet=None
try:
dfLst=[]
timeStart=pd.Timestamp(timeStart)
timeEnd=pd.Timestamp(timeEnd)
# zips die prozessiert werden muessen
dfLookUpZips=self.lookUpDfZips
if timeStart!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpZips=dfLookUpZips[dfLookUpZips['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
for index, row in dfLookUpZips.iterrows():
zip7File=index
(zip7FileHead, zip7FileTail)=os.path.split(zip7File)
dTime=timeStart-row['FirstTime']
nStart = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())
dTime=timeEnd-timeStart
nDelta = int(dTime.total_seconds()/row['TimespanPerLog'].total_seconds())+1
nEnd=nStart+nDelta
logger.debug("{0:s}zip7File: {1:s}: Start: {2:d}/{3:07d} End: {4:d}/{5:07d}".format(logStr,zip7FileTail
,nStart,nStart+row['minFileNr']
,nStart+nDelta,nStart+row['minFileNr']+nDelta))
try:
# wenn zip7File nicht existiert ...
if not os.path.exists(zip7File):
logStrFinal="{0:s}zip7File {1:s} not existing.".format(logStr,zip7File)
logger.debug(logStrFinal)
raise LxError(logStrFinal)
tmpDir=os.path.dirname(zip7File)
tmpDirContent=glob.glob(tmpDir)
with py7zr.SevenZipFile(zip7File, 'r') as zip7FileObj:
allLogFiles = zip7FileObj.getnames()
allLogFilesLen=len(allLogFiles)
logger.debug("{0:s}{1:s}: len(getnames()): {2:d}.".format(logStr,zip7FileTail,allLogFilesLen))
extDirLstTBDeleted=[]
extDirLstExistingLogged=[]
idxEff=0
for idx,logFileNameInZip in enumerate(allLogFiles):
if idx < nStart-idxEff or idx > nEnd+idxEff:
continue
logger.debug("{0:s}idx: {1:d} item: {2:s} ...".format(logStr,idx,logFileNameInZip))
# die Datei die 7Zip bei extract erzeugen wird
logFile=os.path.join(tmpDir,logFileNameInZip)
(logFileHead, logFileTail)=os.path.split(logFile)
# evtl. bezeichnet logFileNameInZip keine Datei sondern ein Verzeichnis
(name, ext)=os.path.splitext(logFileNameInZip)
if ext == '':
# Verzeichnis!
extDir=os.path.join(tmpDir,logFileNameInZip)
(extDirHead, extDirTail)=os.path.split(extDir)
if os.path.exists(extDir) and extDir in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) bereits.".format(logStr,idx,extDirTail))
extDirLstExistingLogged.append(extDir)
elif os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
elif not os.path.exists(extDir) and extDir not in tmpDirContent:
logger.debug("{0:s}idx: {1:d} extDir: {2:s} existiert(e) noch nicht.".format(logStr,idx,extDirTail))
extDirLstTBDeleted.append(extDir)
# kein Logfile zu prozessieren ...
idxEff+=1
continue
# logFileNameInZip bezeichnet eine Datei
if os.path.exists(logFile):
isFile = os.path.isfile(logFile)
if isFile:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert bereits. Wird durch Extrakt ueberschrieben werden.".format(logStr,idx,logFileTail))
logFileTBDeleted=False
else:
logFileTBDeleted=False
else:
logger.debug("{0:s}idx: {1:d} Log: {2:s} existiert nicht. Wird extrahiert, dann prozessiert und dann wieder geloescht.".format(logStr,idx,logFileTail))
logFileTBDeleted=True
# extrahieren
zip7FileObj.extract(path=tmpDir,targets=logFileNameInZip)
if os.path.exists(logFile):
pass
else:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT extracted?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
# ...
if os.path.isfile(logFile):
df = self.__processALogFile(logFile=logFile,readWithDictReader=readWithDictReader,readWindowsLog=readWindowsLog)
if df is None:
logger.warning("{0:s}idx: {1:d} Log: {2:s} NOT processed?! Continue with next Name in 7Zip.".format(logStr,idx,logFileTail))
# nichts zu prozessieren ...
continue
else:
if not filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
df=pd.DataFrame(df[df.apply(filter_fct,axis=1)].values,columns=df.columns)
dfLst.append(df)
# ...
# gleich wieder loeschen
if os.path.exists(logFile) and logFileTBDeleted:
if os.path.isfile(logFile):
os.remove(logFile)
logger.debug("{0:s}idx: {1:d} Log: {2:s} wieder geloescht.".format(logStr,idx,logFileTail))
for dirName in extDirLstTBDeleted:
if os.path.exists(dirName):
if os.path.isdir(dirName):
(dirNameHead, dirNameTail)=os.path.split(dirName)
if len(os.listdir(dirName)) == 0:
os.rmdir(dirName)
logger.debug("{0:s}dirName: {1:s} existierte nicht und wurde wieder geloescht.".format(logStr,dirNameTail))
else:
logger.info("{0:s}dirName: {1:s} existiert mit nicht leerem Inhalt?!".format(logStr,dirNameTail))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
dfRet=pd.concat(dfLst)
del dfLst
if filterAfter and filter_fct != None:
logger.debug("{0:s}Apply Filter ...".format(logStr))
dfRet=pd.DataFrame(dfRet[dfRet.apply(filter_fct,axis=1)].values,columns=dfRet.columns)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfRet
def getTCs(self,dfID=pd.DataFrame(),timeStart=None,timeEnd=None,TCsdfOPCFill=TCsdfOPCFill,persistent=False,overwrite=True):
"""
returns TCs-dfs
Verarbeitung von dfs gemaess extractTCsToH5s; siehe dort
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
TCKeys=['<KEY>','TCsdfSirCalc','TCsdfLDSIn','TCsdfLDSRes1','TCsdfLDSRes2a','TCsdfLDSRes2b','TCsdfLDSRes2c']
if persistent:
with pd.HDFStore(self.h5File) as h5Store:
h5Keys=sorted(h5Store.keys())
#logger.debug("{0:s}h5Keys available: {1:s}".format(logStr,str(h5Keys)))
h5KeysStripped=[item.replace(h5KeySep,'') for item in h5Keys]
if set(TCKeys) & set(h5KeysStripped) == set(TCKeys):
if not overwrite:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - return aus H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC=pd.read_hdf(self.h5File,key='<KEY>')
TCsdfSirCalc=pd.read_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn=pd.read_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1=pd.read_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a=pd.read_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b=pd.read_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c=pd.read_hdf(self.h5File,key='TCsdfLDSRes2c')
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren alle bereits - sollen aber ueberschrieben werden ...".format(logStr,str(TCKeys)))
else:
logger.debug("{0:s}persistent: TCKeys {1:s} existieren nicht (alle) ...".format(logStr,str(TCKeys)))
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
dfLst=[]
for h5Key in h5Keys:
logger.debug("{0:s}Get (read_hdf) df with h5Key: {1:s} ...".format(logStr,h5Key))
dfSingle=pd.read_hdf(self.h5File, key=h5Key)
dfSingle=dfSingle[['ID','ProcessTime','ScenTime','SubSystem','Value','Direction']]
dfSingle=dfSingle[~(dfSingle['Value'].isnull())]
dfLst.append(dfSingle)
logger.debug("{0:s}{1:s}".format(logStr,'Extraction finished. Concat ...'))
df=pd.concat(dfLst)
del dfLst
logger.debug("{0:s}{1:s}".format(logStr,'Concat finished. Filter & Pivot ...'))
if not dfID.empty:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
else:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes=self.getTCsFromDf(df,dfID=dfID,TCsdfOPCFill=TCsdfOPCFill)
if persistent:
logger.debug("{0:s}peristent: TCKeys {1:s} nach H5-File ...".format(logStr,str(TCKeys)))
TCsdfOPC.to_hdf(self.h5File,key='TCsdfOPC')
TCsdfSirCalc.to_hdf(self.h5File,key='TCsdfSirCalc')
TCsdfLDSIn.to_hdf(self.h5File,key='TCsdfLDSIn')
TCsdfLDSRes1.to_hdf(self.h5File,key='TCsdfLDSRes1')
TCsdfLDSRes2a.to_hdf(self.h5File,key='TCsdfLDSRes2a')
TCsdfLDSRes2b.to_hdf(self.h5File,key='TCsdfLDSRes2b')
TCsdfLDSRes2c.to_hdf(self.h5File,key='TCsdfLDSRes2c')
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not dfID.empty:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2#a,TCsdfLDSRes2b,TCsdfLDSRes2c
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1
def getTCsFromH5s(self,timeStart=None,timeEnd=None, LDSResOnly=False, LDSResColsSpecified=None, LDSResTypeSpecified=None, timeShiftPair=None):
"""
returns several TC-dfs from TC-H5s:
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
LDSResOnly:
TCsdfLDSRes1,TCsdfLDSRes2
or
TCsdfLDSRes
LDSResColsSpecified:
return in LDSRes df(s) only the specified cols
all cols are returned otherwise
LDSResTypeSpecified:
return TCsdfLDSRes1 (SEG) for 'SEG' or TCsdfLDSRes2 (Druck) for 'Druck'
both are returned otherwise
timeShiftPair: (preriod,freq): i.e. (1,'H'); if not None index is shifted
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
try:
self.h5FileLDSRes1
Res2=True
except:
Res2=False
TCsdfOPC=pd.DataFrame()
TCsdfSirCalc=pd.DataFrame()
TCsdfLDSIn=pd.DataFrame()
if Res2:
TCsdfLDSRes1=pd.DataFrame()
TCsdfLDSRes2=pd.DataFrame()
else:
TCsdfLDSRes=pd.DataFrame()
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys used: {1:s}".format(logStr,str(h5Keys)))
h5KeysOPC=['TCsOPC'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysSirCalc=['TCsSirCalc'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSIn=['TCsLDSIn'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes1=['TCsLDSRes1'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes2=['TCsLDSRes2'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysLDSRes=['TCsLDSRes'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
h5KeysAll=zip(h5Keys,h5KeysOPC,h5KeysSirCalc,h5KeysLDSIn,h5KeysLDSRes1,h5KeysLDSRes2,h5KeysLDSRes)
for idx,(h5Key,h5KeyOPC,h5KeySirCalc,h5KeyLDSIn,h5KeyLDSRes1,h5KeyLDSRes2,h5KeyLDSRes) in enumerate(h5KeysAll):
if not LDSResOnly:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfOPC ...'))
TCsdfOPC=pd.read_hdf(self.h5FileOPC,h5KeyOPC)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfSirCalc ...'))
TCsdfSirCalc=pd.read_hdf(self.h5FileSirCalc,h5KeySirCalc)
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSIn ...'))
TCsdfLDSIn=pd.read_hdf(self.h5FileLDSIn,h5KeyLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes1 ...'))
TCsdfLDSRes1=pd.read_hdf(self.h5FileLDSRes1,h5KeyLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 ...'))
TCsdfLDSRes2=pd.read_hdf(self.h5FileLDSRes2,h5KeyLDSRes2)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes ...'))
TCsdfLDSRes=pd.read_hdf(self.h5FileLDSRes,h5KeyLDSRes)
if LDSResColsSpecified != None:
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#logger.debug("{0:s}{1:s} {2:s}".format(logStr,'TCsdfLDSRes1 Filter ...',str(LDSResColsSpecified)))
TCsdfLDSRes1=TCsdfLDSRes1.filter(items=LDSResColsSpecified)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes2 Filter ...'))
TCsdfLDSRes2=TCsdfLDSRes2.filter(items=LDSResColsSpecified)
else:
#logger.debug("{0:s}{1:s}".format(logStr,'TCsdfLDSRes Filter ...'))
TCsdfLDSRes=TCsdfLDSRes.filter(items=LDSResColsSpecified)
if idx==0:
if not LDSResOnly:
TCsdfOPCLst=[]
TCsdfSirCalcLst=[]
TCsdfLDSInLst=[]
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst=[]
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst=[]
else:
TCsdfLDSResLst=[]
#logger.debug("{0:s}Append ...".format(logStr))
if not LDSResOnly:
TCsdfOPCLst.append(TCsdfOPC)
TCsdfSirCalcLst.append(TCsdfSirCalc)
TCsdfLDSInLst.append(TCsdfLDSIn)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1Lst.append(TCsdfLDSRes1)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2Lst.append(TCsdfLDSRes2)
else:
TCsdfLDSResLst.append(TCsdfLDSRes)
logger.debug("{0:s}Concat ...".format(logStr))
if not LDSResOnly:
TCsdfOPC=pd.concat(TCsdfOPCLst)
TCsdfSirCalc=pd.concat(TCsdfSirCalcLst)
TCsdfLDSIn=pd.concat(TCsdfLDSInLst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
logger.debug("{0:s}timeShift TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn by {1:d} {2:s} ...".format(logStr,period,freq))
for df in TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn:
df.index=df.index.shift(period,freq=freq)
if Res2:
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
TCsdfLDSRes1=pd.concat(TCsdfLDSRes1Lst)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
TCsdfLDSRes2=pd.concat(TCsdfLDSRes2Lst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
if LDSResTypeSpecified == None or LDSResTypeSpecified=='SEG':
#for df in TCsdfLDSRes1:
logger.debug("{:s}timeShift LDSRes1 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes1.index[0],TCsdfLDSRes1.index[-1]))
TCsdfLDSRes1.index=TCsdfLDSRes1.index.shift(period,freq=freq)
if LDSResTypeSpecified == None or LDSResTypeSpecified=='Druck':
#for df in TCsdfLDSRes2:
logger.debug("{:s}timeShift LDSRes2 by {:d} {:s} Ist: {!s:s} {!s:s} ...".format(logStr,period,freq,TCsdfLDSRes2.index[0],TCsdfLDSRes2.index[-1]))
TCsdfLDSRes2.index=TCsdfLDSRes2.index.shift(period,freq=freq)
else:
TCsdfLDSRes=pd.concat(TCsdfLDSResLst)
if timeShiftPair != None:
(period,freq)=timeShiftPair
logger.debug("{0:s}timeShift LDSRes by {1:d} {2:s} ...".format(logStr,period,freq))
for df in TCsdfLDSRes:
df.index=df.index.shift(period,freq=freq)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
if not LDSResOnly:
if Res2:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes1,TCsdfLDSRes2
else:
return TCsdfOPC,TCsdfSirCalc,TCsdfLDSIn,TCsdfLDSRes
else:
if Res2:
if LDSResTypeSpecified == None:
return TCsdfLDSRes1,TCsdfLDSRes2
elif LDSResTypeSpecified=='SEG':
return TCsdfLDSRes1
elif LDSResTypeSpecified=='Druck':
return TCsdfLDSRes2
else:
return TCsdfLDSRes
def __getH5Keys(self,timeStart=None,timeEnd=None):
"""
returns h5Keys (keys fuer Logfiles in h5File), h5KeysPost (key Postfixe fuer dfs in allen anderen h5Files)
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
dfLookUpTimes=self.lookUpDf
if timeStart!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['LastTime']>=timeStart] # endet nach dem Anfang oder EndeFile ist Anfang
if timeEnd!=None:
dfLookUpTimes=dfLookUpTimes[dfLookUpTimes['FirstTime']<=timeEnd] # beginnt vor dem Ende oder AnfangFile ist Ende
dfLookUpTimesIdx=dfLookUpTimes.set_index('logName')
#dfLookUpTimesIdx.filter(regex='\.log$',axis=0)
h5Keys=['Log'+re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5Keys: {1:s}".format(logStr,str(h5Keys)))
h5KeysPost=[re.search(logFilenameHeadPattern,logFile).group(1) for logFile in dfLookUpTimesIdx.index]
logger.debug("{0:s}h5KeysPost: {1:s}".format(logStr,str(h5KeysPost)))
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return h5Keys,h5KeysPost
def getCVDFromH5(self,timeStart=None,timeEnd=None,timeDelta=None,returnDfCVDataOnly=False):
"""
returns dfCVD, dfCVDataOnly
dfCVD: all rows with Subsystem CVD
dfCVDataOnly: CVs from dfCVD
timeDelta: i.e. pd.Timedelta('1 Hour'); if not None ScenTime is shifted by + timeDelta
returns dfCVDataOnly only, if returnDfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(self.__class__.__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfCVD= | pd.DataFrame() | pandas.DataFrame |
import os
import csv
import argparse
import pandas as pd
from transformers import T5Tokenizer
from torch.utils.data import Dataset
class StsbDataset(Dataset):
def __init__(self, hparams):
self.hparams = argparse.Namespace(**hparams)
self.max_seq_len = self.hparams.max_seq_length
self.max_target_len = self.hparams.max_seq_length
self.source_column = "source_column"
self.tokenizer = T5Tokenizer.from_pretrained(hparams.tokenizer_name_or_path)
self.inputs = []
self.tokenized_targets = []
self.targets = []
def __len__(self):
return len(self.inputs)
def __getitem__(self, index):
source_ids = self.inputs[index]["input_ids"].squeeze()
src_mask = self.inputs[index]["attention_mask"].squeeze()
target_ids = self.tokenized_targets[index]["input_ids"].squeeze()
target_mask = self.tokenized_targets[index]["attention_mask"].squeeze()
target = self.targets[index]
return {"source_ids": source_ids, "source_mask": src_mask, "target_ids": target_ids, "target_mask": target_mask,
'target': target}
def _build(self):
if self.hparams.bucket_mode == 0.2:
self.data['targets'] = self.data[self.target_column].apply(lambda x: round(x * 5) / 5)
if self.hparams.bucket_mode == 0.1:
self.data['targets'] = self.data[self.target_column].apply(lambda x: round(x, 1))
if self.hparams.bucket_mode == 1.0:
self.data['targets'] = self.data[self.target_column].apply(lambda x: round(x))
for idx in range(len(self.data)):
input_, target = self.data.loc[idx, self.source_column], self.data.loc[idx, 'targets']
text_target = str(target)
for idx in range(len(self.data)):
input_, target = self.data.loc[idx, self.source_column], self.data.loc[idx, 'targets']
text_target = str(target)
# tokenize inputs
tokenized_inputs = self.tokenizer.batch_encode_plus(
[input_], max_length=self.max_seq_len, padding='max_length', return_tensors="pt", truncation=True
)
# tokenize targets
tokenized_targets = self.tokenizer.batch_encode_plus(
[text_target], max_length=self.hparams.max_target_len, padding='max_length', return_tensors="pt",
truncation=True
)
self.inputs.append(tokenized_inputs)
self.tokenized_targets.append(tokenized_targets)
self.targets.append(target)
class SvDataset(StsbDataset):
def __init__(self, type_path, hparams):
super().__init__(hparams)
self.path = os.path.join(self.hparams.data_dir, type_path + '.tsv')
self.target_column = 'score'
self.data = pd.read_csv(self.path, sep='\t')
self.data['source_column'] = 's1:' + self.data['sentence1'] + ' s2: ' + self.data['sentence2'] + \
' </s>'
if self.hparams.debug:
self.data = self.data[:30]
print(f'The shape of the {type_path} dataset is {self.data.shape}')
self._build()
class EnDataset(StsbDataset):
def __init__(self, type_path, hparams):
super().__init__(hparams)
self.path = os.path.join(self.hparams.data_dir, type_path + '.csv')
self.target_column = 4
if self.hparams.debias:
data = pd.read_csv(
self.path, delimiter='\t', header=None, quoting=csv.QUOTE_NONE,
usecols=range(7))
df = data.copy()
df = df.drop([6], axis=1)
df['gendered'] = df[5].apply(
lambda x: self.replace_with(x, 'nurse', pronoun=False) if self.is_gendered(x) else False)
df['gender'] = df[5].apply(lambda x: 'man' if x[:5] == 'A man' else 'woman')
df = df[df.gendered != False]
women = df[df['gender'] == 'woman']
men = df[df['gender'] == 'man']
men2 = women.copy()
men2[5] = men2[5].apply(lambda x: self.replace_with(x, 'man', pronoun=False))
women2 = men.copy()
women2[5] = women2[5].apply(lambda x: self.replace_with(x, 'woman', pronoun=False))
women_df = pd.concat([women, women2])
men_df = pd.concat([men, men2])
neutral = pd.concat([women_df, men_df])
neutral = neutral.drop(['gender'], axis=1)
neutral.rename(columns={'gendered': 6}, inplace=True)
self.data = | pd.concat([data, neutral]) | pandas.concat |
# -*- coding: utf-8 -*-
import multiprocessing
import numpy as np
import pandas as pd
from numpy.linalg import svd
from sklearn.preprocessing import StandardScaler
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
from wpca import WPCA
import os.path
import sys
import patsy
from lifelines import CoxPHFitter
from typing import *
import porch.cache as cache
def porch_single_process(expression_df: pd.DataFrame,
geneset_df: pd.DataFrame,
gene_column: str = "gene",
set_column: str = "pathway",
annot_column: str = "reactome_name",
) -> Tuple[pd.DataFrame, Dict[str,Dict[str,float]], List]:
"""
Calculates pathway activities from the expression values of analytes,
with a grouping given by a pathway definition.
This call is functional equivalent to the porch function, with the difference that it is not using
parallel processing. The function is mostly intended for debugging purposes.
Args:
expression_df (pd.DataFrame): The DataFrame of the expression values we analyse. These values are logtransformed and subsequently standardized before analysis
geneset_df (pd.DataFrame): The DataFrame of the pathway definitions.
gene_column (str): The name of the column within geneset_df containing names of analytes.
set_column (str): The name of the column within geneset_df containing names of pathways.
annot_column (str): The name of the column within geneset_df containing annotation of pathways.
Returns:
tuple(pd.DataFrame, list): tuple containing:
- **activity_df** (*pd.DataFrame*): A pandas DataFrames activity_df, containing the pathway activity values for each sample and pathway.
- **untested** (*list*): a list of the pathway that were not possible to decompose, due to shortage of data in expression_df.
"""
# expression_df = expression_df[phenotype_df.columns]
results_df = pd.DataFrame()
set_df = geneset_df[[gene_column, set_column,annot_column]]
set_of_all_genes = set(expression_df.index)
setnames, set_annots, set_sizes, untested, activities, eigen_samples = [], [], [], [], [], {}
for setname, geneset in set_df.groupby([set_column]):
genes = list(set(geneset[gene_column].tolist()) & set_of_all_genes)
annot = (geneset[annot_column].iloc[0] if len(geneset.index)>0 else "Default")
setname, set_annot, set_size, activity, eigen_sample_dict = porch_proc(setname, annot, genes, expression_df)
if activity is None:
untested += [setname]
else:
setnames += [setname]
set_annots += [set_annot]
set_sizes += [set_size]
activities += [activity]
eigen_samples[setname] = eigen_sample_dict
activity_df = pd.DataFrame(data=activities, columns=expression_df.columns, index=setnames)
activity_df["annotation"] = set_annots
activity_df["set_size"] = set_sizes
return activity_df, eigen_samples, untested
def porch(expression_df: pd.DataFrame,
geneset_df: pd.DataFrame,
gene_column: str = "gene",
set_column: str = "pathway",
annot_column: str = "reactome_name",
) -> Tuple[pd.DataFrame, Dict[str,Dict[str,float]], List]:
"""
Calculates pathway activities from the expression values of analytes,
with a grouping given by a pathway definition.
Args:
expression_df (pd.DataFrame): The DataFrame of the expression values we analyse. These values are logtransformed and subsequently standardized befor analysis
geneset_df (pd.DataFrame): The DataFrame of the pathway definitions.
gene_column (str): The name of the column within geneset_df containing names of analytes.
set_column (str): The name of the column within geneset_df containing id:s of pathways.
annot_column (str): The name of the column within geneset_df containing annotation of pathways.
Returns:
Tuple(pd.DataFrame, Dict[str,Dict[str,float]], list): tuple containing:
- **activity_df** (*pd.DataFrame*): A pandas DataFrames activity_df, containing the pathway activity values for each sample and pathway.
- **eigen_samples** (*Dict[str,Dict[str,float]]*): A dictonary of the pathways eigen samples, i.e. represenative pattern of analyte expression values
- **untested** (*list*): a list of the pathway that were not possible to decompose, due to shortage of data in expression_df.
"""
set_df = geneset_df[[gene_column, set_column,annot_column]]
set_of_all_genes = set(expression_df.index)
call_args = []
for setname, geneset in set_df.groupby([set_column]):
genes = list(set(geneset[gene_column].tolist()) & set_of_all_genes)
annot = (geneset[annot_column].iloc[0] if len(geneset.index)>0 else "Default")
call_args += [(setname, annot, genes, expression_df)]
print("Processing with {} parallel processes".format(os.cpu_count()), file=sys.stderr)
setnames, set_annots, set_sizes, untested, activities, eigen_samples = [], [], [], [], [], {}
with multiprocessing.Pool() as executor:
for setname, set_annot, set_size, activity, eigen_sample_dict in executor.starmap(porch_proc, call_args):
if activity is None:
untested += [setname]
else:
setnames += [setname]
set_annots += [set_annot]
set_sizes += [set_size]
activities += [activity]
eigen_samples[setname] = eigen_sample_dict
activity_df = pd.DataFrame(data=activities, columns=expression_df.columns, index=setnames)
activity_df["annotation"] = set_annots
activity_df["set_size"] = set_sizes
return activity_df, eigen_samples, untested
def porch_proc(setname, set_annotation, genes, expression_df,keep_feature_stdv=True):
""" Core processing node of porch. Takes the analysis from expression values to significance testing. """
# print("Decomposing " + setname, file=sys.stderr)
expr = expression_df.loc[genes]
expr.dropna(axis=0, how='any', inplace=True)
expr = expr.loc[~(expr<=0.0).any(axis=1)]
if expr.shape[0]>2:
try:
standardizer = StandardScaler(with_std=keep_feature_stdv)
log_data = np.log(expr.values.T.astype(float))
standard_log_data = standardizer.fit_transform(log_data).T
eigen_genes, eigen_samples = decomposition_method(standard_log_data)
eigen_sample_dict = dict(zip(expr.index,eigen_samples))
return setname, set_annotation, len(genes), eigen_genes, eigen_sample_dict
except ValueError:
pass
# print("Not enough data to evaluate " + setname, file=sys.stderr)
return setname, set_annotation, len(genes), None, None
def porch_reactome(expression_df: pd.DataFrame,
organism: str = "HSA",
gene_anot: str = "Ensembl") -> Tuple[pd.DataFrame, List]:
"""
Download the Reactome database and subsequently call porch.
Args:
expression_df (pd.DataFrame): The DataFrame of the expression values we analyse. These values are logtransformed and subsequently standardized befor analysis
organism (str): The three letter reactome abriviation of organism, e.g. HSA or MMU
gene_anot (str): Reactome name of row annotation, e.g. Ensembl or ChEBI
Returns:
tuple(pd.DataFrame, list): tuple containing:
- **activity_df** (*pd.DataFrame*): A pandas DataFrames activity_df, containing the pathway activity values for each sample and pathway.
- **untested** (*list*): a list of the pathway that were not possible to decompose, due to shortage of data in expression_df.
"""
reactome_df = get_reactome_df(organism, gene_anot)
# return porch_single_process(expression_df, reactome_df, "gene", "reactome_id")
return porch(expression_df, reactome_df,
"gene", "reactome_id", "reactome_name")
def porch_multi_reactome(expression_df,list_of_expression_annotations):
""" Download the Reactome database and subsequently call porch. """
reactome_df = None
for organism, gene_anot in list_of_expression_annotations:
r_df = get_reactome_df(organism, gene_anot)
if reactome_df is None:
reactome_df = r_df
else:
reactome_df.append(r_df)
return porch_single_process(expression_df, reactome_df, "gene", "reactome_id", "reactome_name")
# return porch(expression_df, reactome_df,
# "gene", "reactome_id")
def wpca_decomposition(data):
weights = 0. + np.isfinite(data)
kwds = {'weights': weights}
pca = WPCA(n_components=1).fit(data, **kwds)
eigen_samples = pca.transform(data)[:,0]
eigen_genes = pca.components_[0,:]
return eigen_genes, eigen_samples
def svd_decomposition(data):
U, S, Vt = svd(data, full_matrices=False)
eigen_genes = (Vt.T)[:,0]
eigen_samples = U[:,0]
return eigen_genes, eigen_samples
# decomposition_method = svd_decomposition
#decomposition_method = wpca_decomposition
def linear_model(test,activity_df,phenotype_df):
"""
Applies a linear model, test, that is a function of variables in phenotype_df,
to each row in the activity_df.
Args:
activity_df (pd.DataFrame): The DataFrame of the pathway activity values we analyse.
phenotype_df (pd.DataFrame): The DataFrame containing any sample oriented variables that are included in the model.
test (str): linear model that should be tested. The model should contain the variable Pathway, that will be replaced with each pathway's activity.
Returns:
results_df
A pandas DataFrames results_df, containing the output of the significance tests
"""
expression_df = activity_df.copy()
phenotype_df = phenotype_df[[ col for col in phenotype_df.columns if col in expression_df.columns]]
expression_df = expression_df[phenotype_df.columns]
significance_df = expression_df.apply(applicable_linear_model,
axis=1, result_type='reduce',
args=(test,phenotype_df))
significance_df["annotation"] = activity_df["annotation"]
significance_df["set_size"] = activity_df["set_size"]
return significance_df
def applicable_linear_model(row,test,phenotype_df):
phenotype_df.loc["Pathway"] = row.values
lm = ols(test, phenotype_df.T).fit()
try:
pvals = anova_lm(lm)["PR(>F)"].T.iloc[:-1]
#pvals.rename(row.name)
except ValueError:
pvals = None
return pvals
reactome_fn = "2Reactome_All_Levels.txt"
#reactome_fn = "UniProt2Reactome_All_Levels.txt"
reactome_url = "https://reactome.org/download/current/"
def get_reactome_df(organism = "HSA", gene_anot = "Ensembl"):
fn = gene_anot + reactome_fn
path = os.path.join(cache.get_cache_path(),fn)
url = reactome_url + fn
reactome_df = pd.read_csv(cache.download_file(path, url),
sep='\t',
header=None,
usecols=[0,1,3],
names=["gene","reactome_id","reactome_name"])
organism = "R-" + organism
reactome_df = reactome_df[reactome_df["reactome_id"].str.startswith(organism) ]
return reactome_df
def read_triqler(file_name):
"""Code for reading a protein.tsv file from triqler"""
pid_col, first_dat_col = 2, 7
proteins, data = [], []
with open(file_name) as infile:
header = infile.readline().split('\t')
last_dat_col = len(header) - 1
col_names = [w.split(':')[2] for w in header[first_dat_col:last_dat_col]]
phen_values = [[int(w.split(':')[0]) for w in header[first_dat_col:last_dat_col]]]
for line in infile.readlines():
words = line.split('\t')
proteins += [words[pid_col]]
data += [[np.exp2(float (w)) for w in words[first_dat_col:last_dat_col]]]
values_df = pd.DataFrame(index=proteins, columns=col_names, data=data)
phenotype_df = pd.DataFrame(index=["SampleGroup"], columns=col_names, data=phen_values)
return values_df, phenotype_df
def survival(row, phenotype_df, duration_col = 'T', event_col = 'E', other_cols = []):
"""
duration_col: survival time
event_col: whether an event (death or other) has ocured or not. 0 for no, 1 for yes
other_cols: other variables to consider in the regression
"""
phenotype_df = phenotype_df.T
phenotype_df = phenotype_df.join(row.astype(float))
phenotype_df[duration_col] = phenotype_df[duration_col].astype(float)
phenotype_df[event_col] = phenotype_df[event_col].astype(int)
# The following lines deal with char conflicts in patsy formulas
duration_col = duration_col.replace(' ','_').replace('.','_').replace('-','_')
event_col = event_col.replace(' ','_').replace('.','_').replace('-','_')
other_cols = [x.replace(' ','_').replace('.','_').replace('-','_') for x in other_cols]
row.name = row.name.replace(' ','_').replace('.','_').replace('-','_')
phenotype_df.columns = [x.replace(' ','_').replace('.','_').replace('-','_') for x in phenotype_df.columns]
formula = row.name + ' + ' + duration_col + ' + ' + event_col
if not not other_cols:
other_cols = [x.replace(' ','_').replace('.','_') for x in other_cols]
formula = formula + ' + ' + ' + '.join(other_cols)
X = patsy.dmatrix(formula_like = formula, data = phenotype_df, return_type = 'dataframe')
X = X.drop(['Intercept'], axis = 1)
cph = CoxPHFitter()
cph.fit(X, duration_col = duration_col, event_col = event_col)
result = cph.summary.loc[row.name]
return result
def porch_single_process_npcs(n, expression_df: pd.DataFrame,
geneset_df: pd.DataFrame,
gene_column: str = "gene",
set_column: str = "pathway",
annot_column: str = "reactome_name",
) -> Tuple[pd.DataFrame, Dict[str,Dict[str,float]], List]:
"""
Calculates pathway activities from the expression values of analytes,
with a grouping given by a pathway definition.
This call is functional equivalent to the porch function, with the difference that it is not using
parallel processing. The function is mostly intended for debugging purposes.
Args:
expression_df (pd.DataFrame): The DataFrame of the expression values we analyse. These values are logtransformed and subsequently standardized before analysis
geneset_df (pd.DataFrame): The DataFrame of the pathway definitions.
gene_column (str): The name of the column within geneset_df containing names of analytes.
set_column (str): The name of the column within geneset_df containing names of pathways.
annot_column (str): The name of the column within geneset_df containing annotation of pathways.
Returns:
tuple(pd.DataFrame, list): tuple containing:
- **activity_df** (*pd.DataFrame*): A pandas DataFrames activity_df, containing the pathway activity values for each sample and pathway.
- **untested** (*list*): a list of the pathway that were not possible to decompose, due to shortage of data in expression_df.
"""
# expression_df = expression_df[phenotype_df.columns]
results_df = pd.DataFrame()
set_df = geneset_df[[gene_column, set_column,annot_column]]
set_of_all_genes = set(expression_df.index)
setnames, set_annots, set_sizes, untested, activities, eigen_samples = [], [], [], [], [], {}
for setname, geneset in set_df.groupby([set_column]):
genes = list(set(geneset[gene_column].tolist()) & set_of_all_genes)
annot = (geneset[annot_column].iloc[0] if len(geneset.index)>0 else "Default")
setname, set_annot, set_size, activity, eigen_sample_dict = porch_proc_npcs(n, setname, annot, genes, expression_df)
if activity is None:
untested += [setname]
else:
setnames += [setname]
set_annots += [set_annot]
set_sizes += [set_size]
activities += [activity]
eigen_samples[setname] = eigen_sample_dict
activity_df = | pd.DataFrame(data=activities, columns=expression_df.columns, index=setnames) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Cryptoledger is a tool to keep track of cryptocurrency transactions
* Imports *.csv transaction lists exported from exchanges (bittrex and gdax supported) and consolidates them in a standardize *.csv file without duplicates.
* Calculate a portfolio on any given dates using the transaction list
* Estimate its value in specified currency using cryptocompare API
* Plot a portfolio pie chart, daily valuation and daily return for a given period
* Create a corresponding PDF report formatted with a Latex template
"""
import sys # system specific package (e.g. provide argv for command line arguments)
import argparse # command line parser
import csv # read and write csv file
import matplotlib.pyplot as plt # for plotting graph
import seaborn as sns # for plotting graphs, nicer theme for matplotlib + wrapper for stat related plot
import datetime as dt # to convert strings from csv to date and time
import calendar # to conver date time to unix timestamp
import warnings
import os # to check if a file exist
import numpy as np # handle multi-dimensional arrays
import pandas as pd # data manipulation and analysis
from pandas.util import hash_pandas_object
import requests # to get json files from cryptocompare API
import jinja2 # to use a latex template
from jinja2 import Template
import shutil # file operations (e.g. copy)
class Transaction:
"""Class representing a transaction (BUY|SELL|WITHDRAWAL|DEPOSIT), converts exchanges raw csv to internal 'native' format"""
fieldnames = ['uid', 'base_c', 'quote_c', 'action', 'qty', 'rate', 'amount', 'commission', 'timestamp', 'exchange'] # short fieldnames for internal use
fieldnames_exp = ['UID', 'Base currency', 'Quote currency', 'Action', 'Quantity (#base)', 'Rate (Quote currency)', 'Amount (Quote currency)', 'Commission (Quote currency)', 'Timestamp', 'Exchange'] # explicit fieldnames for export
def __init__(self, uid='', base_c='', quote_c='', action='', qty=0.0, rate=0.0, amount=0.0, commission=0.0, timestamp=0, exchange=''):
"""initialize transaction with default value"""
self.uid = uid # uniqueID (keep original coin transfer ID or exchange trade id for Traceability)
self.base_c = base_c # base currency (e.g. alts)
self.quote_c = quote_c # quote currency (e.g. BTC, EUR)
self.action = action # action (BUY|SELL|WITHDRAW|DEPOSIT)
self.qty = qty # qty (of base)
self.rate = rate # rate (quote currency/base currency)
self.amount = amount # amount (in quote currency, excluding commission)
self.commission= commission # commission (in quote currency)
self.timestamp = timestamp # timestamp (of exectued time)
self.exchange = exchange # name of exchange
def __str__(self):
"""returns text representation of a transaction"""
return self.fieldnames_exp[0]+" = "+self.uid+"\n"+ \
self.fieldnames_exp[1]+" = "+self.base_c+"\n"+ \
self.fieldnames_exp[2]+" = "+self.quote_c+"\n"+ \
self.fieldnames_exp[3]+" = "+self.action+"\n"+ \
self.fieldnames_exp[4]+" = "+str(self.qty)+"\n"+ \
self.fieldnames_exp[5]+" = "+str(self.rate)+"\n"+ \
self.fieldnames_exp[6]+" = "+str(self.amount)+"\n"+ \
self.fieldnames_exp[7]+" = "+str(self.commission)+"\n"+ \
self.fieldnames_exp[8]+" = "+ dt.datetime.utcfromtimestamp(self.timestamp).strftime('%Y-%m-%d %H:%M:%S') +"\n"+ \
self.fieldnames_exp[9]+" = "+self.exchange+"\n"
def to_dict(self):
"""Returns a transaction in a dict of strings with formated numbers"""
t = dt.datetime.utcfromtimestamp(self.timestamp).strftime('%Y-%m-%d %H:%M:%S')
row = {}
row['uid'] = self.uid
row['base_c'] = self.base_c
row['quote_c'] = self.quote_c
row['action'] = self.action
row['qty'] = '{:.8g}'.format(self.qty)
row['rate'] = '{:.8g}'.format(self.rate)
row['amount'] = '{:.8g}'.format(self.amount)
row['commission'] = '{:.8g}'.format(self.commission)
row['timestamp'] = t
row['exchange'] = self.exchange
return row
def __eq__(self, other):
"""delegate __eq__ to tuple"""
return self.to_tuple() == other.to_tuple()
def __hash__(self):
"""delegate __hash__ to tuple"""
return hash(self.to_tuple())
def to_tuple(self):
"""return attributes defining a transaction as a list"""
return (self.uid, self.base_c, self.quote_c, self.action, self.qty, self.rate, self.amount, self.commission, self.timestamp, self.exchange)
def is_valid(self): # TODO: test other conditions and issue error/warning printing availalbe info
"""test if a transaction is valid (mendatory fields depending on action, verifiy qty*rate == amount, etc.)"""
return self.qty*self.rate == self.amount
@classmethod
def from_dict(cls, dic, d_format='native'):
"""create a transaction from a line of CSV file read by DictReader."""
if d_format == 'native': # i.e. from csv database saved by this program
#~ return cls(*dic.values()) # '*' expand the content of the list as arguments, assumes the order is the same as defined in init function; need to normalize types for sort and hash operations
return cls(dic['uid'], dic['base_c'], dic['quote_c'], dic['action'], float(dic['qty']), float(dic['rate']), float(dic['amount']), float(dic['commission']), int(dic['timestamp']), dic['exchange'])
elif d_format == 'bittrex':
# extract base and quote currencies
(q_c,b_c) = dic['Exchange'].split('-') # assumes there is only one '-'
# check for valid action
act = str()
if 'SELL' in dic['Type'].upper():
act = 'SELL'
elif 'BUY' in dic['Type'].upper():
act = 'BUY'
else:
act = 'unknown action' + dic['Type'] # not that withdrawal and deposit do not appear in bittrex csv export, need to be added manually
#convert datetime string to timestamp
date_conv = dt.datetime.strptime(dic['Closed'],'%m/%d/%Y %I:%M:%S %p') # bittrex datetime format, e.g. 12/5/2017 1:46:32 PM, assumes UTC time zone
date_timestamp = calendar.timegm(date_conv.utctimetuple()) # assumes UCT time, alternatively mktime uses localtiem instead
return cls(uid=dic['OrderUuid'], base_c=b_c, quote_c=q_c, action=act, qty=float(dic['Quantity']), rate=float(dic['Limit']), amount=float(dic['Price']), commission=float(dic['CommissionPaid']), timestamp=date_timestamp, exchange='bittrex')
elif d_format == 'gdax':
# extract base and quote currencies
(b_c,q_c) = dic['product'].split('-') # assumes there is only one '-'
#convert datetime string to timestamp
date_conv = dt.datetime.strptime(dic['created at'][:-5],'%Y-%m-%dT%H:%M:%S') # gdax datetime format, e.g. 2017-12-05T12:41:32.165Z, assumes always Z (UTC time zone), ignore microseconds
date_timestamp = calendar.timegm(date_conv.utctimetuple()) # assumes UCT time, alternatively mktime uses localtiem instead
#calculate amount excluding fees (standardized data)
qty = float(dic['size'])
rate = float(dic['price'])
amount=qty*rate
return cls(uid=dic['trade id'], base_c=b_c, quote_c=q_c, action=dic['side'], qty=qty, rate=rate, amount=amount , commission=float(dic['fee']), timestamp=date_timestamp, exchange='gdax')
else:
warnings.warn('Unable to init transaction from dict; Unsupported exchange: ' + str(source), UserWarning)
return(cls)
@classmethod
def from_input(cls):
"""create a transaction from manual user input, return None if canceled or invalid"""
#could be improve by validating input one by one or better way to input timestamp, prefill depending on action, etc.
user_input = []
tr = None
while True:
user_input.clear()
print('Manual transaction input', 'timestamp format: %Y-%m-%d %H:%M:%S','', sep='\n')
try:
for line in cls.fieldnames_exp:
user_input.append(input(line+': '))
date_conv = dt.datetime.strptime(user_input[8],'%Y-%m-%d %H:%M:%S')
date_timestamp = calendar.timegm(date_conv.utctimetuple())
tr = cls(user_input[0], user_input[1], user_input[2], user_input[3], float(user_input[4]), float(user_input[5]), float(user_input[6]), float(user_input[7]), date_timestamp, user_input[9])
print('\nNew transaction:',tr,sep='\n')
except ValueError:
print('Invalid input.')
if input('type "y" to try again: ') != 'y':
return None
else:
if input("Enter to confirm, type 'c' to cancel: ") == 'c':
return None
else:
return tr
return None
class Ledger:
"""Class representing a ledger containing a list of transactions. Add transactions from external csv, remove duplicates and sort"""
def __init__(self, csv_db_path ):
"""Initialize the ledger with transactions in csv given in argument if it exist"""
self.transactions = [] # list of transactions
self.csv_db_path = csv_db_path
if os.path.isfile(self.csv_db_path): # load db if it exists
self.append(self.csv_db_path, 'native')
def __str__(self):
"""returns text representation of a ledger"""
text = 'list of transactions:'
for line in self.transactions:
text += '\n' + str(line)
return text
def append(self, csv_path, csv_format):
"""read transactions from a CSV file, add them to current list and remove duplicates"""
with open(csv_path,'r',newline='\n', encoding='utf-8') as csv_update:
csv_update = (line.replace('\0','') for line in csv_update)# use inline generator to replace NULL values (assumes NULL values are not due to incorrect encoding)
csv_reader = csv.DictReader(csv_update, Transaction.fieldnames if csv_format=='native' else None)
if csv_format == 'native':
next(csv_reader) # skip the first line when using native format because headers are given separately
for line in csv_reader:
self.transactions.append( Transaction.from_dict(line, csv_format) )
self.remove_duplicates()
def manual_append(self):
"""Manually append new transaction"""
tr = Transaction.from_input()
if tr is not None:
self.transactions.append(tr)
self.remove_duplicates()
def save(self):
with open(self.csv_db_path,'w', encoding='utf-8') as csv_ledger:
csv_writer = csv.writer(csv_ledger,delimiter=',')
csv_writer.writerow(Transaction.fieldnames_exp) # write header line with expanded names
for line in self.transactions:
csv_writer.writerow( line.to_tuple() )
def to_list(self, first_date, last_date):
"""Returns a list of transactions between the first and last date, formatted as a list of dict"""
table = []
for l in self.transactions:
if first_date <= dt.datetime.utcfromtimestamp(l.timestamp).date() <= last_date:
table.append(l.to_dict())
return table
def remove_duplicates(self):
"""Remove duplicates from transaction list and resort according to timestamp"""
self.transactions = list(set(self.transactions))
self.transactions = sorted(self.transactions, key=lambda k: k.timestamp,reverse=False) # sort list of dict according to key
class Portfolio:
"""Class calculating a portfolio as function of the date from a ledger (list of transactions)"""
def __init__(self, ledger, eval_symbol='EUR'):
""" initialize portfolio with ledger pass in argument."""
self.ledger = ledger
self.snapshot = None # portfolio snapshot on last date
self.p_raw = pd.DataFrame() # portfolio with quantity of each coin
self.calculate(ledger)
self.eval_symbol=eval_symbol # currency in which to evalute the portfolio
self.p_eval = pd.DataFrame() # portfolio evaluated in eval_symbol currency
self.evaluate()
def calculate(self, ledger):
"""Calculate portfolio from ledger"""
if len(ledger.transactions) == 0:
return
#populate portfolio by processing the transactions in ledger
for l in ledger.transactions:
day = pd.to_datetime(l.timestamp,unit='s', utc=True, origin='unix' ).date() # convert timestamp to datetime and keep the date part
# if new day, copy last line (if exist) in dataframe with new day as index
if day not in self.p_raw.index and len(self.p_raw) > 0:
self.p_raw = self.p_raw.append( pd.DataFrame(data=self.p_raw.tail(1).values, index=[day], columns=self.p_raw.columns) )
# if base currency is not in portfolio yet, initilize to zero
if l.base_c not in self.p_raw.columns:
self.p_raw.at[day,l.base_c] = 0.0
# if base currency at current index doesn't exist, initialize to zero (should not happen if new liens are copies of previous)
elif pd.isna(self.p_raw.at[day,l.base_c]):
self.p_raw.at[day,l.base_c] = 0.0
# same for quote currency
if l.quote_c not in self.p_raw.columns:
self.p_raw.at[day,l.quote_c] = 0.0
elif | pd.isna(self.p_raw.at[day,l.base_c]) | pandas.isna |
from __future__ import print_function
import os
import datetime
import sys
import pandas as pd
import numpy as np
import requests
import copy
# import pytz
import seaborn as sns
from urllib.parse import quote
import monetio.obs.obs_util as obs_util
"""
NAME: cems_api.py
PGRMMER: <NAME> ORG: ARL
This code written at the NOAA air resources laboratory
Python 3
#################################################################
The key and url for the epa api should be stored in a file called
.epaapirc in the $HOME directory.
The contents should be
key: apikey
url: https://api.epa.gov/FACT/1.0/
TO DO
-----
Date is in local time (not daylight savings)
Need to convert to UTC. This will require an extra package or api.
Classes:
----------
EpaApiObject - Base class
EmissionsCall
FacilitiesData
MonitoringPlan
Emissions
CEMS
Functions:
----------
addquarter
get_datelist
findquarter
sendrequest
getkey
"""
def test_end(endtime, current):
# if endtime None return True
if isinstance(endtime, pd._libs.tslibs.nattype.NaTType):
return True
elif not endtime:
return True
# if endtime greater than current return true
elif endtime >= current:
return True
# if endtime less than current time return true
elif endtime < current:
return False
else:
return True
def get_filename(fname, prompt):
"""
determines if file exists. If prompt is True then will prompt for
new filename if file does not exist.
"""
if fname:
done = False
iii = 0
while not done:
if iii > 2:
done = True
iii += 1
if os.path.isfile(fname):
done = True
elif prompt:
istr = "\n" + fname + " is not a valid name for Facilities Data \n"
istr += "Please enter a new filename \n"
istr += "enter None to load from the api \n"
istr += "enter x to exit program \n"
fname = input(istr)
# print('checking ' + fname)
if fname == "x":
sys.exit()
if fname.lower() == "none":
fname = None
done = True
else:
fname = None
done = True
return fname
# def get_timezone_offset(latitude, longitude):
# """
# uses geonames API
# must store username in the $HOME/.epaapirc file
# geousername: username
# """
# username = getkey()
# print(username)
# username = username["geousername"]
# url = "http://api.geonames.org/timezoneJSON?lat="
# request = url + str(latitude)
# request += "&lng="
# request += str(longitude)
# request += "&username="
# request += username
# try:
# data = requests.get(request)
# except BaseException:
# data = -99
#
# jobject = data.json()
# print(jobject)
# print(data)
# # raw offset should give standard time offset.
# if data == -99:
# return 0
# else:
# offset = jobject["rawOffset"]
# return offset
def getkey():
"""
key and url should be stored in $HOME/.epaapirc
"""
dhash = {}
homedir = os.environ["HOME"]
fname = "/.epaapirc"
if os.path.isfile(homedir + fname):
with open(homedir + fname) as fid:
lines = fid.readlines()
for temp in lines:
temp = temp.split(" ")
dhash[temp[0].strip().replace(":", "")] = temp[1].strip()
else:
dhash["key"] = None
dhash["url"] = None
dhash["geousername"] = None
return dhash
def sendrequest(rqq, key=None, url=None):
"""
Method for sending requests to the EPA API
Inputs :
--------
rqq : string
request string.
Returns:
--------
data : response object
"""
if not key or not url:
keyhash = getkey()
apiurl = keyhash["url"]
key = keyhash["key"]
if key:
# apiurl = "https://api.epa.gov/FACT/1.0/"
rqq = apiurl + rqq + "?api_key=" + key
print("Request: ", rqq)
data = requests.get(rqq)
print("Status Code", data.status_code)
if data.status_code == 429:
print("Too many requests Please Wait before trying again.")
sys.exit()
else:
print("WARNING: your api key for EPA data was not found")
print("Please obtain a key from")
print("https://www.epa.gov/airmarkets/field-audit-checklist_tool-fact-api")
print("The key should be placed in $HOME/.epaapirc")
print("Contents of the file should be as follows")
print("key: apikey")
print("url: https://api.epa.gov/FACT/1.0/")
sys.exit()
return data
def get_lookups():
"""
Request to get lookups - descriptions of various codes.
"""
getstr = "emissions/lookUps"
# rqq = self.apiurl + "emissions/" + getstr
# rqq += "?api_key=" + self.key
data = sendrequest(getstr)
jobject = data.json()
dstr = unpack_response(jobject)
return dstr
# According to lookups MODC values
# 01 primary monitoring system
# 02 backup monitoring system
# 03 alternative monitoring system
# 04 backup monitoring system
# 06 average hour before/hour after
# 07 average hourly
# 21 negative value replaced with 0.
# 08 90th percentile value in Lookback Period
# 09 95th precentile value in Lookback Period
# etc.
# it looks like values between 1-4 ok
# 6-7 probably ok
# higher values should be flagged.
def quarter2date(year, quarter):
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 11, 1)
return dt
def addquarter(rdate):
"""
INPUT
rdate : datetime object
RETURNS
newdate : datetime object
requests for emissions are made per quarter.
Returns first date in the next quarter from the input date.
"""
quarter = findquarter(rdate)
quarter += 1
year = rdate.year
if quarter > 4:
quarter = 1
year += 1
month = 3 * quarter - 2
newdate = datetime.datetime(year, month, 1, 0)
return newdate
def get_datelist_sub(r1, r2):
rlist = []
qt1 = findquarter(r1)
yr1 = r1.year
qt2 = findquarter(r2)
yr2 = r2.year
done = False
iii = 0
while not done:
rlist.append(quarter2date(yr1, qt1))
if yr1 > yr2:
done = True
elif yr1 == yr2 and qt1 == qt2:
done = True
qt1 += 1
if qt1 > 4:
qt1 = 1
yr1 += 1
iii += 0
if iii > 30:
break
return rlist
def get_datelist(rdate):
"""
INPUT
rdate : tuple of datetime objects
(start date, end date)
RETURNS:
rdatelist : list of datetimes covering range specified by rdate by quarter.
Return list of first date in each quarter from
startdate to end date.
"""
if isinstance(rdate, list):
rdatelist = get_datelist_sub(rdate[0], rdate[1])
else:
rdatelist = [rdate]
return rdatelist
def findquarter(idate):
if idate.month <= 3:
qtr = 1
elif idate.month <= 6:
qtr = 2
elif idate.month <= 9:
qtr = 3
elif idate.month <= 12:
qtr = 4
return qtr
def keepcols(df, keeplist):
tcols = df.columns.values
klist = []
for ttt in keeplist:
if ttt in tcols:
# if ttt not in tcols:
# print("NOT IN ", ttt)
# print('Available', tcols)
# else:
klist.append(ttt)
tempdf = df[klist]
return tempdf
def get_so2(df):
"""
drop columns that are not in keep.
"""
keep = [
# "DateHour",
"time local",
# "time",
"OperatingTime",
# "HourLoad",
# "u so2_lbs",
"so2_lbs",
# "AdjustedFlow",
# "UnadjustedFlow",
# "FlowMODC",
"SO2MODC",
"unit",
"stackht",
"oris",
"latitude",
"longitude",
]
df = keepcols(df, keep)
if not df.empty:
df = df[df["oris"] != "None"]
return df
class EpaApiObject:
def __init__(self, fname=None, save=True, prompt=False, fdir=None):
"""
Base class for all classes that send request to EpaApi.
to avoid sending repeat requests to the api, the default option
is to save the data in a file - specified by fname.
fname : str
fdir : str
save : boolean
prompt : boolean
"""
# fname is name of file that data would be saved to.
self.status_code = None
self.df = pd.DataFrame()
self.fname = fname
self.datefmt = "%Y %m %d %H:%M"
if fdir:
self.fdir = fdir
else:
self.fdir = "./apifiles/"
if self.fdir[-1] != "/":
self.fdir += "/"
# returns None if filename does not exist.
# if prompt True then will ask for new filename if does not exist.
fname2 = get_filename(self.fdir + fname, prompt)
self.getstr = self.create_getstr()
# if the file exists load data from it.
getboolean = True
if fname2:
print("Loading from file ", self.fdir + self.fname)
self.fname = fname2
self.df, getboolean = self.load()
elif fname:
self.fname = self.fdir + fname
# if it doesn't load then get it from the api.
# if save is True then save.
if self.df.empty and getboolean:
# get sends request to api and processes data received.
self.df = self.get()
if save:
self.save()
def set_filename(self, fname):
self.fname = fname
def load(self):
chash = {"mid": str, "oris": str}
df = | pd.read_csv(self.fname, index_col=[0], converters=chash, parse_dates=True) | pandas.read_csv |
# Starter code for multiple regressors implemented by <NAME>
# Source code based on Forecasting Favorites, 1owl
# https://www.kaggle.com/the1owl/forecasting-favorites , version 10
# Part II
import numpy as np
import pandas as pd
from sklearn import preprocessing, linear_model, metrics
import gc; gc.enable()
import random
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import TheilSenRegressor, BayesianRidge
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
import time
np.random.seed(1122)
# store the total processing time
start_time = time.time()
tcurrent = start_time
print('Three regressors - Neural network (MLP), Bayesian Ridge, Bagging(4x) and XGBoost (2x)\n')
print('Datasets reading')
# read datasets
dtypes = {'id':'int64', 'item_nbr':'int32', 'store_nbr':'int8', 'onpromotion':str}
data = {
#R 'tra': pd.read_csv('../input/train.csv', dtype=dtypes, parse_dates=['date']),
'tra': pd.read_csv('../input/processed/train_4r.csv', dtype=dtypes, parse_dates=['date']),
'tes': pd.read_csv('../input/test.csv', dtype=dtypes, parse_dates=['date']),
'ite': pd.read_csv('../input/items.csv'),
'sto': pd.read_csv('../input/stores.csv'),
'trn': pd.read_csv('../input/transactions.csv', parse_dates=['date']),
'hol': pd.read_csv('../input/holidays_events.csv', dtype={'transferred':str}, parse_dates=['date']),
'oil': pd.read_csv('../input/oil.csv', parse_dates=['date']),
}
# dataset processing
print('Datasets processing')
# Filter the training data to contain only august starting from the day 16
# which is reasonable since the test period is 2017-08-16 until 2017-08-31
#R train = data['tra'][(data['tra']['date'].dt.month == 8) & (data['tra']['date'].dt.day > 15)]
train = data['tra']
del data['tra']; gc.collect();
target = train['unit_sales'].values
target[target < 0.] = 0.
train['unit_sales'] = np.log1p(target)
def df_lbl_enc(df):
for c in df.columns:
if df[c].dtype == 'object':
lbl = preprocessing.LabelEncoder()
df[c] = lbl.fit_transform(df[c])
print(c)
return df
def df_transform(df):
df['date'] = | pd.to_datetime(df['date']) | pandas.to_datetime |
import os
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian
import pandas as pd
from pandas import DataFrame, HDFStore, Series, _testing as tm, read_hdf
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io import pytables as pytables
from pandas.io.pytables import ClosedFileError, PossibleDataLossError, Term
pytestmark = pytest.mark.single
def test_mode(setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
msg = r"[\S]* does not exist"
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
with HDFStore(path, mode=mode) as store:
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError, match=msg):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
msg = (
"mode w is not allowed while performing a read. "
r"Allowed modes are r, r\+ and a."
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "df", mode=mode)
else:
result = | read_hdf(path, "df", mode=mode) | pandas.read_hdf |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 16 16:39:11 2020
@author: esol
"""
from neqsim.thermo import fluid, fluid_df, printFrame, TPflash, phaseenvelope
import pandas as pd
import random
#create gas condesate fluid
gascondensate = {'ComponentName': ["nitrogen", "CO2", "methane", "ethane", "propane", "i-butane", "n-butane", "i-pentane", "n-pentane", "n-hexane", "C7", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18", "C19", "C20"],
'MolarComposition[-]': [0.53, 3.3, 72.98, 7.68, 4.1, 0.7, 1.42, 0.54, 0.67, 0.85, 1.33, 1.33, 0.78, 0.61, 0.42, 0.33, 0.42, 0.24, 0.3, 0.17, 0.21, 0.15, 0.15, 0.8],
'MolarMass[kg/mol]': [None,None, None,None,None,None,None,None,None,None,0.0913, 0.1041, 0.1188, 0.136, 0.150, 0.164, 0.179, 0.188, 0.204, 0.216, 0.236, 0.253, 0.27, 0.391],
'RelativeDensity[-]': [None,None, None,None,None,None,None,None,None,None, 0.746, 0.768, 0.79, 0.787, 0.793, 0.804, 0.817, 0.83, 0.835, 0.843, 0.837, 0.84, 0.85, 0.877]
}
gascondensatedf = | pd.DataFrame(gascondensate) | pandas.DataFrame |
from torchero.utils.data.datasets import Dataset
import pandas as pd
class TabularDataset(Dataset):
""" Dataset for labeled text
"""
@classmethod
def from_json(
cls,
path,
field_names,
target_field_names=None,
transform=None,
target_transform=None,
orient='records'
):
""" Creates an instance from a json file (with list of dict fields scheme).
Arguments:
path (str or Path): Path of the json file
field_name (str or int): If a string is passed the column name for the
texts. If a int is passed the index of the column for the text
target_file_name (str, int, list of str or list of str): If a
string is passed the column name for the target. If a int is
passed the index of the column for the target
transforms (callable, optional): Transform functions for list of fields
target_transform (callable, optional): Transform function for the list of target fields
"""
squeeze = isinstance(field_names, (int, str))
if squeeze:
field_names = [field_names]
if target_field_names is not None:
squeeze_targets = isinstance(target_field_names, (int, str))
if squeeze_targets:
target_field_names = [target_field_names]
else:
target_field_names = []
records = pd.read_json(path, orient=orient)
data = records[field_names]
if squeeze:
data = data[data.columns[0]]
if target_field_names is not None:
target_data = records[target_field_names]
if squeeze_targets:
target_data = target_data[target_data.columns[0]]
else:
target_data = None
return cls(
data,
target_data,
transform=transform,
target_transform=target_transform,
squeeze_data=squeeze,
squeeze_targets=squeeze_targets)
@classmethod
def from_csv(
cls,
path,
columns,
target_columns=None,
delimiter=",",
quotechar='"',
has_header=True,
column_names=None,
compression='infer',
transform=None,
target_transform=None,
):
""" Creates an instance from a csv file
Arguments:
path (str or Path): Path of the csv file
columns (tuple of str or int): Columns names (or column indices to
use) for the input data
target_columns (tuple of str or int): Column names (or
column-indices to use) for the target data
delimiter (str): Character used to splits the csv fields
quotechar (str): Character used to delimit the text strings
has_header (bool): True if the csv contains a header. False,
otherwise
column_names (list): List of columns names
compression (str): Compression method for the csv. Set to 'infer'
to infer it from the extension
transform (callable, optional): Transform functions for the row
target_transform (callable, optional): Transform functions for the
row target
"""
squeeze = isinstance(columns, (int, str))
if squeeze:
columns = [columns]
if target_columns is not None:
squeeze_targets = isinstance(target_columns, (int, str))
if squeeze_targets:
target_columns = [target_columns]
else:
squeeze_targets = False
target_columns = []
records = pd.read_csv(path,
usecols=columns + target_columns,
delimiter=delimiter,
quotechar=quotechar,
names=column_names,
compression=compression)
data = records[columns]
if target_columns:
target_data = records[target_columns]
else:
target_data = None
return cls(data,
target_data,
transform=transform,
target_transform=target_transform,
squeeze_data=squeeze,
squeeze_targets=squeeze_targets)
def __init__(
self,
data,
target_data=None,
transform=None,
target_transform=None,
squeeze_data=True,
squeeze_targets=True,
):
""" Constructor
Arguments:
data (iter-like, pd.DataFrame, pd.Series): Input samples.
targets (iter-like, pd.DataFrame, pd.Series): Targets for each sample.
transform (callable, optional): Transform functions for the input samples
target_transform (callable, optional): Transform functions for the targets
squeeze_data (bool): When set to True if the data is a single column every item of the
dataset returns the column value instead of a tuple.
squeeze_targets (bool): When set to True if the target is a single column every item of the
dataset returns the value instead of a tuple.
"""
self.data = | pd.DataFrame(data) | pandas.DataFrame |
# Copyright 2017-2021 QuantRocket LLC - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import pandas as pd
import numpy as np
import time
import requests
import json
import math
from moonshot.slippage import FixedSlippage
from moonshot.mixins import WeightAllocationMixin
from moonshot.cache import Cache
from moonshot.exceptions import MoonshotError, MoonshotParameterError
from quantrocket.price import get_prices
from quantrocket.master import list_calendar_statuses, download_master_file
from quantrocket.account import download_account_balances, download_exchange_rates
from quantrocket.blotter import list_positions, download_order_statuses
class Moonshot(
WeightAllocationMixin):
"""
Base class for Moonshot strategies.
To create a strategy, subclass this class. Implement your trading logic in the class
methods, and store your strategy parameters as class attributes.
Class attributes include built-in Moonshot parameters which you can override, as well
as your own custom parameters.
To run a backtest, at minimum you must implement `prices_to_signals`, but in general you will
want to implement the following methods (which are called in the order shown):
`prices_to_signals` -> `signals_to_target_weights` -> `target_weights_to_positions` -> `positions_to_gross_returns`
To trade (i.e. generate orders intended to be placed, but actually placed by other services
than Moonshot), you must also implement `order_stubs_to_orders`. Order generation for trading
follows the path shown below:
`prices_to_signals` -> `signals_to_target_weights` -> `order_stubs_to_orders`
Parameters
----------
CODE : str, required
the strategy code
DB : str, required
code of db to pull data from
DB_FIELDS : list of str, optional
fields to retrieve from db (defaults to ["Open", "Close", "Volume"])
DB_TIMES : list of str (HH:MM:SS), optional
for intraday databases, only retrieve these times
DB_DATA_FREQUENCY : str, optional
Only applicable when DB specifies a Zipline bundle. Whether to query minute or
daily data. If omitted, defaults to minute data for minute bundles and to daily
data for daily bundles. This parameter only needs to be set to request daily data
from a minute bundle. Possible choices: daily, minute (or aliases d, m).
SIDS : list of str, optional
limit db query to these sids
UNIVERSES : list of str, optional
limit db query to these universes
EXCLUDE_SIDS : list of str, optional
exclude these sids from db query
EXCLUDE_UNIVERSES : list of str, optional
exclude these universes from db query
CONT_FUT : str, optional
pass this cont_fut option to db query (default None)
LOOKBACK_WINDOW : int, optional
get this many days additional data prior to the backtest start date or
trade date to account for rolling windows. If set to None (the default),
will use the largest value of any attributes ending with `*_WINDOW`, or
252 if no such attributes, and will further pad window based on any
`*_INTERVAL` attributes, which are interpreted as pandas offset aliases
(for example `REBALANCE_INTERVAL = 'Q'`). Set to 0 to disable.
NLV : dict, optional
dict of currency:NLV for each currency represented in the strategy. Can
alternatively be passed directly to backtest method.
COMMISSION_CLASS : Class or dict of (sectype,exchange,currency):Class, optional
the commission class to use. If strategy includes a mix of security types,
exchanges, or currencies, you can pass a dict mapping tuples of
(sectype,exchange,currency) to the different commission classes. By default
no commission is applied.
SLIPPAGE_CLASSES : iterable of slippage classes, optional
one or more slippage classes. By default no slippage is applied.
SLIPPAGE_BPS : float, optional
amount on one-slippage to apply to each trade in BPS (for example, enter 5 to deduct
5 BPS)
BENCHMARK : str, optional
the sid of a security in the historical data to use as the benchmark
BENCHMARK_DB : str, optional
the database containing the benchmark, if different from DB. BENCHMARK_DB
should contain end-of-day data, not intraday (but can be used with intraday
backtests).
BENCHMARK_TIME : str (HH:MM:SS), optional
use prices from this time of day as benchmark prices. Only applicable if
benchmark prices originate in DB (not BENCHMARK_DB), DB contains intraday
data, and backtest results are daily.
TIMEZONE : str, optional
convert timestamps to this timezone (if not provided, will be inferred
from securities universe if possible)
CALENDAR : str, optional
use this exchange's trading calendar to determine which date's signals
should be used for live trading. If the exchange is currently open,
today's signals will be used. If currently closed, the signals corresponding
to the last date the exchange was open will be used. If no calendar is specified,
today's signals will be used.
POSITIONS_CLOSED_DAILY : bool
if True, positions in backtests that fall on adjacent days are assumed to
be closed out and reopened each day rather than held continuously; this
impacts commission and slippage calculations (default is False, meaning
adjacent positions are assumed to be held continuously)
ALLOW_REBALANCE : bool or float
in live trading, whether to allow rebalancing of existing positions that
are already on the correct side. If True (the default), allow rebalancing.
If False, no rebalancing. If set to a positive decimal, allow rebalancing
only when the existing position differs from the target position by at least
this percentage. For example 0.5 means don't rebalance a position unless
the position will change by +/-50%.
CONTRACT_VALUE_REFERENCE_FIELD : str, optional
the price field to use for determining contract values for the purpose of
applying commissions and constraining weights in backtests and calculating
order quantities in trading. Defaults to the first available of Close, Open,
MinuteCloseClose, SecondCloseClose, LastPriceClose, BidPriceClose, AskPriceClose,
TimeSalesLastPriceClose, TimeSalesFilteredLastPriceClose, LastPriceMean,
BidPriceMean, AskPriceMean, TimeSalesLastPriceMean, TimeSalesFilteredLastPriceMean,
MinuteOpenOpen, SecondOpenOpen, LastPriceOpen, BidPriceOpen, AskPriceOpen,
TimeSalesLastPriceOpen, TimeSalesFilteredLastPriceOpen.
ACCOUNT_BALANCE_FIELD : str or list of str, optional
the account field to use for calculating order quantities as a percentage of
account equity. Applies to trading only, not backtesting. Default is
NetLiquidation. If a list of fields is provided, the minimum value is used.
For example, ['NetLiquidation', 'PreviousEquity'] means to use the lesser of
NetLiquidation or PreviousEquity to determine order quantities.
Examples
--------
Example of a minimal strategy that runs on a history db called "mexi-stk-1d" and buys when
the securities are above their 200-day moving average:
>>> MexicoMovingAverage(Moonshot):
>>>
>>> CODE = "mexi-ma"
>>> DB = "mexi-stk-1d"
>>> MAVG_WINDOW = 200
>>>
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(self.MAVG_WINDOW).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
CODE = None
DB = None
DB_FIELDS = ["Open", "Close", "Volume"]
DB_TIMES = None
DB_DATA_FREQUENCY = None
SIDS = None
UNIVERSES = None
EXCLUDE_SIDS = None
EXCLUDE_UNIVERSES = None
CONT_FUT = None
LOOKBACK_WINDOW = None
NLV = None
COMMISSION_CLASS = None
SLIPPAGE_CLASSES = ()
SLIPPAGE_BPS = 0
BENCHMARK = None
BENCHMARK_DB = None
BENCHMARK_TIME = None
TIMEZONE = None
CALENDAR = None
POSITIONS_CLOSED_DAILY = False
ALLOW_REBALANCE = True
CONTRACT_VALUE_REFERENCE_FIELD = None
ACCOUNT_BALANCE_FIELD = None
def __init__(self):
self.is_trade = False
self.review_date = None # see trade() docstring
self.is_backtest = False
self._securities_master = None
self._backtest_results = {}
self._inferred_timezone = None
self._signal_date = None # set by _weights_to_today_weights
self._signal_time = None # set by _weights_to_today_weights
def prices_to_signals(self, prices):
"""
From a DataFrame of prices, return a DataFrame of signals. By convention,
signals should be 1=long, 0=cash, -1=short.
Must be implemented by strategy subclasses.
Parameters
----------
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
signals
Examples
--------
Buy when the close is above yesterday's 50-day moving average:
>>> def prices_to_signals(self, prices):
>>> closes = prices.loc["Close"]
>>> mavgs = closes.rolling(50).mean()
>>> signals = closes > mavgs.shift()
>>> return signals.astype(int)
"""
raise NotImplementedError("strategies must implement prices_to_signals")
def signals_to_target_weights(self, signals, prices):
"""
From a DataFrame of signals, return a DataFrame of target weights.
Whereas signals indicate the direction of the trades, weights
indicate both the direction and size. For example, -0.5 means a short
position equal to 50% of the equity allocated to the strategy.
Weights are used to help create orders in live trading, and to help
simulate executed positions in backtests.
The default implemention of this method evenly divides allocated
capital among the signals each period, but it is intended to be
overridden by strategy subclasses.
A variety of built-in weight allocation algorithms are provided by
and documented under `moonshot.mixins.WeightAllocationMixin`.
Parameters
----------
signals : DataFrame, required
a DataFrame of signals
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame
of price/market data
Returns
-------
DataFrame
weights
Examples
--------
The default implementation is shown below:
>>> def signals_to_target_weights(self, signals, prices):
>>> weights = self.allocate_equal_weights(signals) # provided by moonshot.mixins.WeightAllocationMixin
>>> return weights
"""
weights = self.allocate_equal_weights(signals)
return weights
def target_weights_to_positions(self, weights, prices):
"""
From a DataFrame of target weights, return a DataFrame of simulated
positions.
The positions should shift the weights based on when the weights
would be filled in live trading.
By default, assumes the position are taken in the period after the
weights were allocated. Intended to be overridden by strategy
subclasses.
Parameters
----------
weights : DataFrame, required
a DataFrame of weights
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
positions
Examples
--------
The default implemention is shown below (enter position in the period after
signal generation/weight allocation):
>>> def target_weights_to_positions(self, weights, prices):
>>> positions = weights.shift()
>>> return positions
"""
positions = weights.shift()
return positions
def positions_to_gross_returns(self, positions, prices):
"""
From a DataFrame of positions, return a DataFrame of returns before
commissions and slippage.
By default, assumes entry on the close on the period the position is
taken and calculates the return through the following period's close.
Intended to be overridden by strategy subclasses.
Parameters
----------
positions : DataFrame, required
a DataFrame of positions
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
gross returns
Examples
--------
The default implementation is shown below:
>>> def positions_to_gross_returns(self, positions, prices):
>>> closes = prices.loc["Close"]
>>> gross_returns = closes.pct_change() * positions.shift()
>>> return gross_returns
"""
closes = prices.loc["Close"]
gross_returns = closes.pct_change() * positions.shift()
return gross_returns
def order_stubs_to_orders(self, orders, prices):
"""
From a DataFrame of order stubs, creates a DataFrame of fully
specified orders.
Parameters
----------
orders : DataFrame
a DataFrame of order stubs, with columns Sid, Account, Action,
OrderRef, and TotalQuantity
prices : DataFrame
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
DataFrame
a DataFrame of fully specified orders, with (at minimum) columns
Exchange, Tif, OrderType added
Examples
--------
The orders DataFrame provided to this method resembles the following:
>>> print(orders)
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
The default implemention creates MKT DAY orders and is
shown below:
>>> def order_stubs_to_orders(self, orders, prices):
>>> orders["OrderType"] = "MKT"
>>> orders["Tif"] = "DAY"
>>> return orders
Set a limit price equal to the prior closing price:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
>>> orders["OrderType"] = "LMT"
>>> orders["LmtPrice"] = prior_closes
"""
orders["OrderType"] = "MKT"
orders["Tif"] = "DAY"
return orders
def reindex_like_orders(self, df, orders):
"""
Reindexes a DataFrame (having Sids as columns and dates as index)
to match the shape of the orders DataFrame.
Parameters
----------
df : DataFrame, required
a DataFrame of arbitrary values with Sids as columns and
dates as index
orders : DataFrame, required
an orders DataFrame with a Sid column
Returns
-------
Series
a Series with an index matching orders
Examples
--------
Calculate prior closes (assuming daily bars) and reindex like
orders:
>>> closes = prices.loc["Close"]
>>> prior_closes = closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
Calculate prior closes (assuming 30-min bars) and reindex like
orders:
>>> session_closes = prices.loc["Close"].xs("15:30:00", level="Time")
>>> prior_closes = session_closes.shift()
>>> prior_closes = self.reindex_like_orders(prior_closes, orders)
"""
df = df.loc[self._signal_date]
if "Time" in df.index.names:
if not self._signal_time:
raise MoonshotError(
"cannot reindex DataFrame like orders because DataFrame contains "
"'Time' in index, please take a cross-section first, for example: "
"`my_dataframe.xs('15:45:00', level='Time')`")
df = df.loc[self._signal_time]
df.name = "_MoonshotOther"
df = orders.join(df, on="Sid")._MoonshotOther
df.name = None
return df
def orders_to_child_orders(self, orders):
"""
From a DataFrame of orders, returns a DataFrame of child orders
(bracket orders) to be submitted if the parent orders fill.
An OrderId column will be added to the orders DataFrame, and child
orders will be linked to it via a ParentId column. The Action
(BUY/SELL) will be reversed on the child orders but otherwise the
child orders will be identical to the parent orders.
Parameters
----------
orders : DataFrame, required
an orders DataFrame
Returns
-------
DataFrame
a DataFrame of child orders
Examples
--------
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif
0 12345 BUY 200 SMART MKT Day
1 23456 BUY 400 SMART MKT Day
>>> child_orders = self.orders_to_child_orders(orders)
>>> child_orders.loc[:, "OrderType"] = "MOC"
>>> orders = pd.concat([orders,child_orders])
>>> orders.head()
Sid Action TotalQuantity Exchange OrderType Tif OrderId ParentId
0 12345 BUY 200 SMART MKT Day 0 NaN
1 23456 BUY 400 SMART MKT Day 1 NaN
0 12345 SELL 200 SMART MOC Day NaN 0
1 23456 SELL 400 SMART MOC Day NaN 1
"""
if "OrderId" not in orders.columns:
orders["OrderId"] = orders.index.astype(str) + ".{0}".format(time.time())
child_orders = orders.copy()
child_orders.rename(columns={"OrderId":"ParentId"}, inplace=True)
child_orders.loc[orders.Action=="BUY", "Action"] = "SELL"
child_orders.loc[orders.Action=="SELL", "Action"] = "BUY"
return child_orders
def _quantities_to_order_stubs(self, quantities):
"""
From a DataFrame of quantities to be ordered (with Sids as index,
Accounts as columns), returns a DataFrame of order stubs.
quantities in:
Account U12345 U55555
Sid
12345 -100 -50
23456 100 50
34567 200 100
order_stubs out:
Sid Account Action OrderRef TotalQuantity
0 12345 U12345 SELL my-strategy 100
1 12345 U55555 SELL my-strategy 50
2 23456 U12345 BUY my-strategy 100
3 23456 U55555 BUY my-strategy 50
4 34567 U12345 BUY my-strategy 200
5 34567 U55555 BUY my-strategy 100
"""
quantities.index.name = "Sid"
quantities.columns.name = "Account"
quantities = quantities.stack()
quantities.name = "Quantity"
order_stubs = quantities.to_frame().reset_index()
order_stubs["Action"] = np.where(order_stubs.Quantity > 0, "BUY", "SELL")
order_stubs = order_stubs.loc[order_stubs.Quantity != 0].copy()
order_stubs["OrderRef"] = self.CODE
order_stubs["TotalQuantity"] = order_stubs.Quantity.abs()
order_stubs = order_stubs.drop("Quantity",axis=1)
return order_stubs
def _get_nlv(self):
"""
Return a dict of currency:NLV for each currency in the strategy. By
default simply returns the NLV class attribute.
"""
return self.NLV
def _positions_to_turnover(self, positions):
"""
Given a dataframe of positions, returns a dataframe of turnover. 0
indicates no turnover; 1 indicates going from 100% short to cash or
cash to 100% long (for example), and vice versa; and 2 indicates
going from 100% short to %100 long (for example).
"""
# Intraday trades are opened and closed each day there's a position,
# so the turnover is twice the positions.
if self.POSITIONS_CLOSED_DAILY:
turnover = positions * 2
else:
turnover = positions.fillna(0).diff()
return turnover.abs()
def _weights_to_today_weights(self, weights, prices):
"""
From a DataFrame of target weights, extract the row that contains the
weights that should be used for today's trading. Returns a Series of
weights by sid:
Sid
12345 -0.2
23456 0
34567 0.1
The date whose weights are selected is usually today, but if CALENDAR
is used and the market is closed it will be the date when the market
closed. Can also be overridden by review_date.
For intraday strategies, the time whose weights are selected is the
latest time that is earlier than the time at which the strategy is
running.
"""
# First, get the signal date
# Use review_date if set
if self.review_date:
dt = pd.Timestamp(self.review_date)
# Else use trading calendar if provided
elif self.CALENDAR:
status = list_calendar_statuses([self.CALENDAR])[self.CALENDAR]
# If the exchange if closed, the signals should correspond to the
# date the exchange was last open
if status["status"] == "closed":
dt = pd.Timestamp(status["since"])
# If the exchange is open, the signals should correspond to
# today's date
else:
dt = pd.Timestamp.now(tz=status["timezone"])
# If no trading calendar, use today's date (in strategy timezone)
else:
tz = self.TIMEZONE or self._inferred_timezone
dt = pd.Timestamp.now(tz=tz)
# Keep only the date as the signal_date
self._signal_date = pd.Timestamp(dt.date())
# extract the current time (or review date time)
trade_time = dt.strftime("%H:%M:%S")
weights_is_intraday = "Time" in weights.index.names
try:
today_weights = weights.loc[self._signal_date]
except KeyError:
if weights_is_intraday:
max_date = weights.index.get_level_values("Date").max()
else:
max_date = weights.index.max()
msg = ("expected signal date {0} not found in target weights DataFrame, "
"is the underlying data up-to-date? (max date is {1})")
if not self.CALENDAR and not weights_is_intraday and self._signal_date.date() - max_date.date() == pd.Timedelta(days=1):
msg += (" If your strategy trades before the open and {0} data "
"is not expected, try setting CALENDAR = <exchange>")
raise MoonshotError(msg.format(
self._signal_date.date().isoformat(),
max_date.date().isoformat()))
if not weights_is_intraday:
print("using target weights for {0} to create orders".format(self._signal_date.date().isoformat()))
return today_weights
# For intraday strategies, select the weights from the latest time
# that is earlier than the trade time. Note that we select the
# expected time from the entire weights DataFrame, which will result
# in a failure if that time is missing for the trade date
unique_times = weights.index.get_level_values("Time").unique()
self._signal_time = unique_times[unique_times < trade_time].max()
if pd.isnull(self._signal_time):
msg = (
"cannot determine which target weights to use for orders because "
"target weights DataFrame contains no times earlier than trade time {0} "
"for signal date {1}".format(
trade_time,
self._signal_date.date().isoformat()))
if self.review_date:
msg += ", please adjust the review_date"
raise MoonshotError(msg)
# get_prices inserts all times into each day's index, thus
# the signal_time will be in the weights DataFrame even if the data
# is stale. Instead, to validate the data, we make sure that there is
# at least one nonnull field in the prices DataFrame at the
# signal_time on the signal_date
today_prices = prices.xs(self._signal_date, level="Date")
notnull_today_prices = today_prices[today_prices.notnull().any(axis=1)]
try:
no_signal_time_prices = notnull_today_prices.xs(self._signal_time, level="Time").empty
except KeyError:
no_signal_time_prices = True
if no_signal_time_prices:
msg = ("no {0} data found in prices DataFrame for signal date {1}, "
"is the underlying data up-to-date? (max time for {1} "
"is {2})")
notnull_max_date = notnull_today_prices.iloc[-1].name[-1]
raise MoonshotError(msg.format(
self._signal_time,
self._signal_date.date().isoformat(),
notnull_max_date))
today_weights = today_weights.loc[self._signal_time]
print("using target weights for {0} at {1} to create orders".format(
self._signal_date.date().isoformat(),
self._signal_time))
return today_weights
def _get_commissions(self, positions, prices):
"""
Returns the commissions to be subtracted from the returns.
"""
if not self.COMMISSION_CLASS:
return pd.DataFrame(0, index=positions.index, columns=positions.columns)
turnover = self._positions_to_turnover(positions)
contract_values = self._get_contract_values(prices)
prices_is_intraday = "Time" in prices.index.names
positions_is_intraday = "Time" in positions.index.names
if prices_is_intraday and not positions_is_intraday:
contract_values = contract_values.groupby(
contract_values.index.get_level_values("Date")).first()
fields = prices.index.get_level_values("Field").unique()
if "Nlv" in self._securities_master.columns:
nlvs = contract_values.apply(lambda x: self._securities_master.Nlv, axis=1)
else:
nlvs = None
# handle the case of only one commission class
if not isinstance(self.COMMISSION_CLASS, dict):
commissions = self.COMMISSION_CLASS.get_commissions(contract_values, turnover=turnover, nlvs=nlvs)
return commissions
# handle multiple commission classes per sectype/exchange/currency
# first, tuple-ize the dict keys in case they are lists
commission_classes = {}
for sec_group, commission_cls in self.COMMISSION_CLASS.items():
commission_classes[tuple(sec_group)] = commission_cls
defined_sec_groups = set([tuple(k) for k in commission_classes.keys()])
# Reindex master fields like contract_values
sec_types = contract_values.apply(lambda x: self._securities_master.SecType, axis=1)
exchanges = contract_values.apply(lambda x: self._securities_master.Exchange, axis=1)
currencies = contract_values.apply(lambda x: self._securities_master.Currency, axis=1)
required_sec_groups = set([
tuple(s.split("|")) for s in (sec_types+"|"+exchanges+"|"+currencies).iloc[-1].unique()])
missing_sec_groups = required_sec_groups - defined_sec_groups
if missing_sec_groups:
raise MoonshotParameterError("expected a commission class for each combination of (sectype,exchange,currency) "
"but none is defined for {0}".format(
", ".join(["({0})".format(",".join(t)) for t in missing_sec_groups])))
all_commissions = pd.DataFrame(None, index=positions.index, columns=positions.columns)
for sec_group in required_sec_groups:
commission_cls = commission_classes[sec_group]
sec_type, exchange, currency = sec_group
sec_group_commissions = commission_cls.get_commissions(
contract_values, turnover=turnover, nlvs=nlvs)
in_sec_group = (sec_types == sec_type) & (exchanges == exchange) & (currencies == currency)
all_commissions = sec_group_commissions.where(in_sec_group, all_commissions)
return all_commissions
def _get_slippage(self, positions, prices):
"""
Returns the slippage to be subtracted from the returns.
"""
turnover = self._positions_to_turnover(positions)
slippage = pd.DataFrame(0, index=turnover.index, columns=turnover.columns)
slippage_classes = self.SLIPPAGE_CLASSES or ()
if not isinstance(slippage_classes, (list, tuple)):
slippage_classes = [slippage_classes]
for slippage_class in slippage_classes:
slippage += slippage_class().get_slippage(turnover, positions, prices)
if self.SLIPPAGE_BPS:
slippage += FixedSlippage(self.SLIPPAGE_BPS/10000.0).get_slippage(turnover, positions, prices)
return slippage.fillna(0)
def _constrain_weights(self, weights, prices):
"""
Constrains the weights by the quantity constraints defined in
limit_position_sizes.
"""
max_quantities_for_longs, max_quantities_for_shorts = self.limit_position_sizes(prices)
if max_quantities_for_longs is None and max_quantities_for_shorts is None:
return weights
if "Nlv" not in self._securities_master.columns:
raise MoonshotParameterError("must provide NLVs if using limit_position_sizes")
contract_values = self._get_contract_values(prices)
contract_values = contract_values.fillna(method="ffill")
nlvs_in_trade_currency = contract_values.apply(lambda x: self._securities_master.Nlv, axis=1)
prices_is_intraday = "Time" in prices.index.names
weights_is_intraday = "Time" in weights.index.names
if prices_is_intraday and not weights_is_intraday:
# we somewhat arbitrarily pick the contract value as of the
# earliest time of day; this contract value might be somewhat
# stale but it avoids the possible lookahead bias of using, say,
# the contract value as of the latest time of day. We could ask
# the user to supply a time but that is rather clunky.
earliest_time = prices.index.get_level_values("Time").unique().min()
contract_values = contract_values.xs(earliest_time, level="Time")
nlvs_in_trade_currency = nlvs_in_trade_currency.xs(earliest_time, level="Time")
# Convert weights to quantities
trade_values_in_trade_currency = weights * nlvs_in_trade_currency
# Note: we take abs() of contract_values because combos can have
# negative prices which would invert the sign of the trade
quantities = trade_values_in_trade_currency / contract_values.where(contract_values != 0).abs()
quantities = quantities.round().fillna(0).astype(int)
# Constrain quantities
if max_quantities_for_longs is not None:
max_quantities_for_longs = max_quantities_for_longs.abs()
quantities = max_quantities_for_longs.where(
quantities > max_quantities_for_longs, quantities)
if max_quantities_for_shorts is not None:
max_quantities_for_shorts = -max_quantities_for_shorts.abs()
quantities = max_quantities_for_shorts.where(
quantities < max_quantities_for_shorts, quantities)
# Convert quantities back to weights
target_trade_values_in_trade_currency = quantities * contract_values
weights = target_trade_values_in_trade_currency / nlvs_in_trade_currency
return weights
def limit_position_sizes(self, prices):
"""
This method should return a tuple of DataFrames::
return max_quantities_for_longs, max_quantities_for_shorts
where the DataFrames define the maximum number of shares/contracts
that can be held long and short, respectively. Maximum limits might
be based on available liquidity (recent volume), shortable shares
available, etc.
The shape and alignment of the returned DataFrames should match that of the
target_weights returned by `signals_to_target_weights`. Target weights will be
reduced, if necessary, based on max_quantities_for_longs and max_quantities_for_shorts.
Return None for one or both DataFrames to indicate "no limits."
For example to limit shorts but not longs::
return None, max_quantities_for_shorts
Within a DataFrame, any None or NaNs will be treated as "no limit" for that
particular security and date.
Note that max_quantities_for_shorts can equivalently be represented with
positive or negative numbers. This is OK::
AAPL
2018-05-18 100
2018-05-19 100
This is also OK::
AAPL
2018-05-18 -100
2018-05-19 -100
Both of the above DataFrames would mean: short no more than 100 shares of
AAPL.
Parameters
----------
prices : DataFrame, required
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
price/market data
Returns
-------
tuple of (DataFrame, DataFrame)
max quantities for long, max quantities for shorts
Examples
--------
Limit quantities to 1% of 15-day average daily volume:
>>> def limit_position_sizes(self, prices):
>>> # assumes end-of-day bars, for intraday bars, use `.xs` to
>>> # select a time of day
>>> volumes = prices.loc["Volume"]
>>> mean_volumes = volumes.rolling(15).mean()
>>> max_shares = (mean_volumes * 0.01).round()
>>> max_quantities_for_longs = max_quantities_for_shorts = max_shares
>>> return max_quantities_for_longs, max_quantities_for_shorts
"""
max_quantities_for_longs = None
max_quantities_for_shorts = None
return max_quantities_for_longs, max_quantities_for_shorts
@classmethod
def _get_lookback_window(cls):
"""
Returns cls.LOOKBACK_WINDOW if set, otherwise infers the lookback
window from `_WINDOW`, defaulting to 252. Then increases the lookback
based on `_INTERVAL` attributes, which are interpreted as pandas
frequencies (for example `REBALANCE_INTERVAL` = 'Q'). This ensures the
lookback is sufficient when resampling to quarterly etc. for periodic
rebalancing.
"""
if cls.LOOKBACK_WINDOW is not None:
return cls.LOOKBACK_WINDOW
window_attrs = [getattr(cls, attr) for attr in dir(cls) if attr.endswith("_WINDOW")]
windows = [attr for attr in window_attrs if isinstance(attr, int)]
lookback_window = max(windows) if windows else 252
# Add _INTERVAL if any
offset_aliases = [getattr(cls, attr) for attr in dir(cls) if attr.endswith("_INTERVAL")]
intervals = []
for freq in offset_aliases:
if not freq:
continue
try:
periods = pd.date_range(start=pd.to_datetime('today'),
freq=freq, periods=2)
except ValueError:
continue
# Use the period date range to count bdays in period
bdays = len(pd.bdate_range(start=periods[0], end=periods[1]))
intervals.append(bdays)
if intervals:
lookback_window += max(intervals)
return lookback_window
def _load_master_file(self, sids, nlv=None, no_cache=False):
"""
Loads master file from cache or master service.
"""
securities = None
fields = [
"Currency", "Multiplier", "PriceMagnifier",
"Exchange", "SecType", "Symbol", "Timezone"]
if self.is_backtest and not no_cache:
# try to load from cache
securities = Cache.get(sids, prefix="_master")
if securities is None:
# query master
f = io.StringIO()
download_master_file(
f,
sids=sids,
fields=fields)
securities = pd.read_csv(f, index_col="Sid")
if self.is_backtest:
Cache.set(sids, securities, prefix="_master")
if not self.TIMEZONE:
timezones = securities.Timezone.unique()
if len(timezones) > 1:
raise MoonshotParameterError(
"cannot infer timezone because multiple timezones are present "
"in data, please specify TIMEZONE explicitly (timezones: {0})".format(
", ".join(timezones)))
self._inferred_timezone = timezones[0]
# Append NLV if applicable
nlvs = nlv or self._get_nlv()
if nlvs:
# For FX, store NLV based on the quote currency (extracted from the Symbol)
# not Currency (100 EUR.USD = 100 EUR, not 100 USD)
currencies = securities.Symbol.astype(str).str.split(".").str[0].where(
securities.SecType=="CASH", securities.Currency)
missing_nlvs = set(currencies) - set(nlvs.keys())
if missing_nlvs:
raise MoonshotParameterError(
"NLV dict is missing values for required currencies: {0}".format(
", ".join(missing_nlvs)))
securities["Nlv"] = currencies.apply(lambda currency: nlvs.get(currency, None))
self._securities_master = securities.sort_index()
@classmethod
def _get_start_date_with_lookback(cls, start_date):
"""
Returns the start_date adjusted to incorporate the LOOKBACK_WINDOW,
plus a buffer. LOOKBACK_WINDOW is measured in trading days, but we
query the db in calendar days. Convert from weekdays (260 per year)
to calendar days, assuming 25 holidays (NYSE has ~9 per year, TSEJ
has ~19), plus a buffer (which varies by window size) to be safe.
"""
lookback_window = cls._get_lookback_window()
days_per_year = 365
weekdays_per_year = 260
max_holidays_per_year = 25
trading_days_per_year = weekdays_per_year - max_holidays_per_year
# Vary the buffer by the window length (for very short windows, the
# user might not want to load too much data so we want to keep the
# buffer reasonably small)
# No window, no buffer
if lookback_window == 0:
buffer = 0
# for window < 1 week, a 2 day buffer (plus the calendar day to
# trading day conversion) will suffice
elif lookback_window <= 5:
buffer = 2
# longer than a week, err on the side of loading ample data
else:
buffer = 10
start_date = pd.Timestamp(start_date) - pd.Timedelta(
days=math.ceil(lookback_window*days_per_year/trading_days_per_year) + buffer)
return start_date.date().isoformat()
def get_prices(self, start_date, end_date=None, nlv=None, no_cache=False):
"""
Downloads prices from a history db and/or real-time aggregate db.
Downloads security details from the master db.
"""
if start_date:
start_date = self._get_start_date_with_lookback(start_date)
codes = self.DB
if not isinstance(codes, (list, tuple)):
codes = [self.DB]
sids = self.SIDS or []
# Add benchmark sid if needed. It's needed if there is no
# BENCHMARK_DB, and sids or universes are specified (if they're
# not specified, the whole db will be queried, including the
# benchmark)
if (
self.is_backtest
and self.BENCHMARK
and not self.BENCHMARK_DB
and (sids or self.UNIVERSES)
):
sids = list(sids).copy()
sids.append(self.BENCHMARK)
kwargs = dict(
codes=codes,
start_date=start_date,
end_date=end_date,
universes=self.UNIVERSES,
sids=sids,
exclude_universes=self.EXCLUDE_UNIVERSES,
exclude_sids=self.EXCLUDE_SIDS,
times=self.DB_TIMES,
cont_fut=self.CONT_FUT,
fields=self.DB_FIELDS,
timezone=self.TIMEZONE,
data_frequency=self.DB_DATA_FREQUENCY
)
if not self.TIMEZONE:
kwargs["infer_timezone"] = True
prices = None
if self.is_backtest and not no_cache:
# If no end_date is specified (indicating the user wants
# up-to-date history), we don't want to use the cache if the dbs
# were more recently modified (indicating new data collection).
# If there's an end date, we use the cache if possible. (The user
# can use --no-cache to disable cache usage if needed.)
if not end_date:
unless_dbs_modified = {
"services": ["history", "realtime"],
"codes": codes}
else:
unless_dbs_modified = None
# try to load from cache
prices = Cache.get(kwargs, prefix="_history", unless_dbs_modified=unless_dbs_modified)
if prices is None:
prices = get_prices(**kwargs)
if self.is_backtest:
Cache.set(kwargs, prices, prefix="_history")
self._load_master_file(prices.columns.tolist(), nlv=nlv, no_cache=no_cache)
return prices
def _prices_to_signals(self, prices, **kwargs):
"""
Converts a prices DataFrame to a DataFrame of signals. This private
method, which simply calls the user-modified public method
`prices_to_signals`, exists for the benefit of the MoonshotML
subclass, which overrides it.
"""
return self.prices_to_signals(prices)
def backtest(self, start_date=None, end_date=None, nlv=None, allocation=1.0,
label_sids=False, no_cache=False):
"""
Backtest a strategy and return a DataFrame of results.
Parameters
----------
start_date : str (YYYY-MM-DD), optional
the backtest start date (default is to include all history in db)
end_date : str (YYYY-MM-DD), optional
the backtest end date (default is to include all history in db)
nlv : dict
dict of currency:nlv. Should contain a currency:nlv pair for
each currency represented in the strategy
allocation : float
how much to allocate to the strategy
label_sids : bool
replace <Sid> with <Symbol>(<Sid>) in columns in output
for better readability (default True)
no_cache : bool
don't use cached files even if available. Using cached files speeds
up backtests but may be undesirable if underlying data has changed.
See http://qrok.it/h/mcache to learn more about caching in Moonshot.
Returns
-------
DataFrame
multiindex (Field, Date) or (Field, Date, Time) DataFrame of
backtest results
"""
self.is_backtest = True
allocation = allocation or 1.0
prices = self.get_prices(start_date, end_date, nlv=nlv, no_cache=no_cache)
signals = self._prices_to_signals(prices, no_cache=no_cache)
weights = self.signals_to_target_weights(signals, prices)
weights = weights * allocation
weights = self._constrain_weights(weights, prices)
positions = self.target_weights_to_positions(weights, prices)
gross_returns = self.positions_to_gross_returns(positions, prices)
commissions = self._get_commissions(positions, prices)
slippages = self._get_slippage(positions, prices)
returns = gross_returns.fillna(0) - commissions - slippages
turnover = self._positions_to_turnover(positions)
total_holdings = (positions.fillna(0) != 0).astype(int)
results_are_intraday = "Time" in signals.index.names
all_results = dict(
AbsExposure=positions.abs(),
AbsWeight=weights.abs(),
Commission=commissions,
NetExposure=positions,
Return=returns,
Signal=signals,
Slippage=slippages,
TotalHoldings=total_holdings,
Turnover=turnover,
Weight=weights)
# validate that custom backtest results are daily if results are
# daily
for custom_name, custom_df in self._backtest_results.items():
if "Time" in custom_df.index.names and not results_are_intraday:
raise MoonshotParameterError(
"custom DataFrame '{0}' won't concat properly with 'Time' in index, "
"please take a cross-section first, for example: "
"`my_dataframe.xs('15:45:00', level='Time')`".format(custom_name))
all_results.update(self._backtest_results)
if self.BENCHMARK:
all_results["Benchmark"] = self._get_benchmark(prices, daily=not results_are_intraday)
results = pd.concat(all_results, keys=list(sorted(all_results.keys())))
names = ["Field","Date"]
if results.index.nlevels == 3:
names.append("Time")
results.index.set_names(names, inplace=True)
if label_sids:
symbols = self._securities_master.Symbol
symbols_with_sids = symbols.astype(str) + "(" + symbols.index.astype(str) + ")"
results.rename(columns=symbols_with_sids.to_dict(), inplace=True)
# truncate at requested start_date
if start_date:
results = results.iloc[
results.index.get_level_values("Date") >= | pd.Timestamp(start_date) | pandas.Timestamp |
import pandas as pd
import numpy as np
from datetime import timedelta
from src.d00_utils.utils import resample2weekly
from src.d00_utils.constants import *
import os
class DengueDataApi:
def __init__(self, interpolate=True):
features_train = pd.read_csv(PATH_DATA_RAW + "dengue_features_train.csv", index_col=INDEX_COLS).sort_index()
features_train[WEEK_START_DATE_COL] = pd.to_datetime(features_train[WEEK_START_DATE_COL])
features_test = pd.read_csv(PATH_DATA_RAW + "dengue_features_test.csv", index_col=INDEX_COLS).sort_index()
features_test[WEEK_START_DATE_COL] = pd.to_datetime(features_test[WEEK_START_DATE_COL])
labels_train = pd.read_csv(PATH_DATA_RAW + "dengue_labels_train.csv", index_col=INDEX_COLS).sort_index()
for features_data in [features_test, features_train]:
for city in features_data.index.get_level_values('city').unique():
for year in features_data.loc[city].index.get_level_values('year').unique():
city_year_data = features_data.loc[city].loc[year]
second_to_last_date = city_year_data[WEEK_START_DATE_COL].iloc[-2]
last_date = city_year_data[WEEK_START_DATE_COL].iloc[-1]
if second_to_last_date > last_date:
key = (city, year, city_year_data.index[-1])
features_data.at[key, WEEK_START_DATE_COL] = second_to_last_date + timedelta(weeks=1)
labels_train = labels_train.reindex(features_train.index)
features_train.reset_index(inplace=True)
features_train.set_index(['city', 'year', WEEK_START_DATE_COL], inplace=True)
features_test.reset_index(inplace=True)
features_test.set_index(['city', 'year', WEEK_START_DATE_COL], inplace=True)
labels_train.index = features_train.index
self.__features_train = features_train
self.__features_test = features_test
self.__labels_train = labels_train
x_train = self.__features_train[FEATURE_COLS].copy()
x_test = self.__features_test[FEATURE_COLS].copy()
# handle missing values
if interpolate:
x_train = x_train.interpolate()
x_test = x_test.interpolate()
# transform variables
x_train[LOG_TRANSFORM] = x_train[LOG_TRANSFORM].apply(lambda x: np.log(x+1))
x_test[LOG_TRANSFORM] = x_test[LOG_TRANSFORM].apply(lambda x: np.log(x+1))
# normalize covariates
self.__x_mean = x_train.mean()
self.__x_std = x_train.std()
self.__x_data = self.normalize_x_data(x_train)
self.__x_test = self.normalize_x_data(x_test)
self.__y_data = self.__labels_train['total_cases'].interpolate()
def get_features_train(self):
return self.__features_train.copy()
def get_labels_train(self):
return self.__labels_train.copy()
def get_features_test(self):
return self.__features_test.copy()
def get_x_data(self):
return self.__x_data.copy()
def get_y_data(self):
return self.__y_data.copy()
def normalize_x_data(self, x_data):
return (x_data - self.__x_mean.values[np.newaxis, :]) / self.__x_std.values[np.newaxis, :]
@staticmethod
def interpolate_nan_(x_data):
# x_data.sort_index(inplace=True)
for city in x_data.index.get_level_values('city').unique():
for year in x_data.loc[city].index.get_level_values('year').unique():
for col in x_data.columns:
interpolated_data = x_data[col].loc[city].loc[year].interpolate()
x_data[col].loc[city].loc[year].loc[interpolated_data.index] = interpolated_data.values
return x_data
def split_data(self, train_ratio=0.7, seed=1992, random=True):
x_train = []
y_train = []
x_validate = []
y_validate = []
np.random.seed(seed=seed)
idx = pd.IndexSlice
for city in self.__y_data.index.get_level_values('city').unique():
year_values = self.__y_data.loc[city].index.get_level_values('year').unique()
n_train = int(train_ratio * len(year_values))
if random:
train_years = pd.Index(np.random.choice(year_values, n_train, replace=False), name=year_values.name)
else:
train_years = pd.Index(year_values[:n_train], name=year_values.name)
validate_years = year_values.difference(train_years)
x_train += [self.__x_data.loc[idx[city, train_years, :]]]
x_validate += [self.__x_data.loc[idx[city, validate_years, :]]]
y_train += [self.__y_data.loc[idx[city, train_years, :]]]
y_validate += [self.__y_data.loc[idx[city, validate_years, :]]]
x_train = pd.concat(x_train, axis=0).sort_index()
y_train = | pd.concat(y_train, axis=0) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import timedelta
from numpy import nan
import numpy as np
import pandas as pd
from pandas import (Series, isnull, date_range,
MultiIndex, Index)
from pandas.tseries.index import Timestamp
from pandas.compat import range
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.pchip missing')
def _skip_if_no_akima():
try:
from scipy.interpolate import Akima1DInterpolator # noqa
except ImportError:
import nose
raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
class TestSeriesMissingData(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_timedelta_fillna(self):
# GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1), timedelta(
days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(
0), timedelta(1), timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1)],
dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'), Timestamp(
'20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp(
'20130101'), Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'), Timestamp(
'20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(
['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001',
'2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT, Timestamp(
'2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00'), Timestamp('2011-01-03 10:00'), Timestamp(
'2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp(
'2011-01-02 10:00', tz=tz), Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: | pd.Timestamp('2011-01-04 10:00') | pandas.Timestamp |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": | pandas.StringDtype() | pandas.StringDtype |
import requests
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import json
import pickle
from bs4 import BeautifulSoup as bs
def get_data():
"""
Downloads and cleans data that is used for the covid19 dashboard
Data is collected from:
- Confirmed and Deaths (up until the last day)
https://pomber.github.io/covid19/timeseries.json
- Population
https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations)
- Mobility index:
https://www.data.gouv.fr/en/datasets/r/0e56f0f4-6a82-48d4-b677-96e950100176
- Confirmed and Deaths (Today)
https://www.worldometers.info/coronavirus/
Other datasets such as ICU patients are gathered daily from
https://portal.icuregswe.org/siri/report/inrapp-corona
"""
import requests
url = 'https://pomber.github.io/covid19/timeseries.json'
countries = ['Sweden', 'Denmark', 'Norway', 'Finland', 'Iceland']
df = pd.read_json(url)[countries]
n_rows = df.shape[0]
df['Sweden'][0]
dates = []
for i in range(n_rows):
dates.append(df['Sweden'][i]['date'])
df['Date'] = dates
df['Date'] = pd.to_datetime(df['Date'])
df = df.set_index('Date')
# df = df.rename(columns={'US':'United States','Korea, South':'South Korea', 'Czechia':'Czech Republic'})
df_deaths = pd.DataFrame(index=df.index)
for col in df.columns:
df_deaths[col] = [c.get('deaths') for c in df[col]]
latest_data = death_update(countries)
df_deaths = pd.concat([df_deaths, latest_data])
# Start from March 10 before first deaths
df_deaths = df_deaths[datetime(2020, 1, 31):]
# Fix faulty Iceland data
df_deaths.loc[datetime(2020, 3, 15, 0, 0, 0), 'Iceland'] = 0
df_deaths.loc[datetime(2020, 3, 20, 0, 0, 0,), 'Iceland'] = 1
df_confirmed = pd.DataFrame(index=df.index)
for col in df.columns:
df_confirmed[col] = [c.get('confirmed') for c in df[col]]
latest_data = confirm_update(countries)
df_confirmed = pd.concat([df_confirmed, latest_data])
df_pop = pd.read_html(
'https://en.wikipedia.org/wiki/List_of_countries_by_population_(United_Nations)')[3]
df_pop = df_pop[['Country or area', 'Population(1 July 2019)']]
df_pop['Country or area'] = df_pop['Country or area'].str.replace(
'\[.*\]', '')
df_pop = df_pop.pivot_table(
columns='Country or area', values='Population(1 July 2019)')[df.columns]
df_pop = df_pop / 1000000
df_deaths_per_mn = pd.DataFrame(index=df_deaths.index)
df_confirmed_per_mn = pd.DataFrame(index=df_confirmed.index)
for col in df_deaths.columns:
df_deaths_per_mn[col] = df_deaths[col] / df_pop[col].values
for col in df_confirmed.columns:
df_confirmed_per_mn[col] = df_confirmed[col] / df_pop[col].values
# Fix later on so that each item is stored inside country_df directly
obj = {'df_deaths': df_deaths, 'df_deaths_per_mn': df_deaths_per_mn}
country_dict = {}
for country in countries:
country_df = pd.DataFrame()
for k, df in obj.items():
if '1' in k:
continue
else:
country_df[k] = df[country]
country_df['Cases'] = df_confirmed[country]
country_df['Cases_per_mn'] = df_confirmed_per_mn[country]
country_dict[country] = country_df
mobility_url = "https://www.data.gouv.fr/en/datasets/r/0e56f0f4-6a82-48d4-b677-96e950100176"
mobility_cities = {'Sweden': 'Stockholm', 'Denmark': 'Copenhagen'}
for coun, ci in mobility_cities.items():
mobility = get_mobility(ci)
country_df = country_dict[coun]
country_df['mobility'] = mobility
country_dict[coun] = country_df
# icu data, sweden only right now
icu = pd.read_csv('data/swe_icu.csv')
icu.index = pd.to_datetime(icu['Date'])
swe = country_dict['Sweden']
swe['ICU'] = icu['total_icu']
country_dict['Sweden'] = swe
with open('dates.pkl', 'wb') as f:
dates = {'death_dates': df_deaths.index,
'confirmed_dates': df_confirmed.index}
pickle.dump(dates, f)
with open('countries.pkl', 'wb') as f:
pickle.dump(country_dict, f)
def death_update(country=None):
"""Gets the latest status updatedeath of the corona Virus
Keyword Arguments:
country {List or string]} -- Countries/Country to extract data from (default: {None})
Returns:
Pandas DataFrame -- Latest update of covid19
"""
url = 'https://www.worldometers.info/coronavirus/'
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
df = df[['Country,Other', 'TotalDeaths']]
df.columns = ['Date', 'Deaths']
df = df.transpose()
df.columns = df.loc['Date']
df = df.drop('Date')
df.index = [datetime.now().date()]
df.set_index
if country:
return(df[country])
else:
return df
def confirm_update(country=None):
"""Gets the latest status update of confimred cases
Keyword Arguments:
country {List or string]} -- Countries/Country to extract data from (default: {None})
Returns:
Pandas DataFrame -- Latest update of covid19
"""
url = 'https://www.worldometers.info/coronavirus/'
header = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.75 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
r = requests.get(url, headers=header)
dfs = pd.read_html(r.text)
df = dfs[0]
df = df[['Country,Other', 'TotalCases']]
df.columns = ['Date', 'Confirmed']
df = df.transpose()
df.columns = df.loc['Date']
df = df.drop('Date')
df.index = [datetime.now().date()]
df.set_index
if country:
return(df[country])
else:
return df
def get_mobility(city):
"""Gets the latest mobility data from city mapper
Arguments:
city {String} -- Which city to get data from
Returns:
Pandas Series -- Series of mobility index
"""
url = "https://www.data.gouv.fr/en/datasets/citymapper-mobility-index/"
url_dataset = None
source = requests.get(url)
soup = bs(source.content, features='html.parser')
links = soup.find_all(
"a", attrs={'class': 'btn btn-sm btn-primary', 'download': ""})
for link in links:
href = link.get('href')
if href:
url_dataset = href
if not url_dataset:
return None
mobility = pd.read_csv(url_dataset)
mobility['date'] = | pd.to_datetime(mobility['date']) | pandas.to_datetime |
# system packages
import unittest
import pytest
import pandas as pd
# app packages
from .context import FareCalc
class TotalWeeklyCalcTestSuite(unittest.TestCase):
"""Test the total weekly fare calculations for multiple journeys."""
def test_calc_total_weekly_fare_case1(self):
data = [['Monday',1015,'1 1',2021,2,1],
['Monday',1615,'1 2',2021,2,1],
['Monday',1645,'2 1',2021,2,1],
['Monday',1715,'1 2',2021,2,1],
['Monday',1815,'2 1',2021,2,1],
['Monday',1915,'1 2',2021,2,1],
['Tuesday',1015,'1 1',2021,2,1],
['Tuesday',1315,'1 1',2021,2,1],
['Tuesday',1415,'1 1',2021,2,1],
['Tuesday',1515,'1 2',2021,2,1],
['Tuesday',1730,'2 1',2021,2,1],
['Tuesday',2200,'1 1',2021,2,1],
]
df_weekly_commute = | pd.DataFrame(data, columns = ['day','time','journey','year','month','date']) | pandas.DataFrame |
# Author: <NAME>
# Github: Data-is-Life
# Date: 10/01/2018
import re
import pandas as pd
def rename_columns(strs_to_replace):
'''Keeping Dataframe heading formating consistant by converting all values
to standardized format that is easy to trace back. If left unformatted,
there could be duplicate columns with the same values and it would make it
far more challenging to search for homes.'''
modified_list = []
for num in strs_to_replace:
modified_list.append(num.replace('Redfin Estimate', 'redfin_est'
).replace(
'Beds', 'num_bdrs').replace('beds', 'num_bts').replace(
'Baths', 'num_bts').replace('$', 'price').replace(
'Built: ', 'yr_blt').lower().replace('__', '_').replace(
' ', '_').replace(':_', '').replace(':', '').replace(
'.', '').replace('sqft', 'sq_ft').replace('_(', '_').replace(
'(', '_').replace(')', '').replace(',', '').replace(
'minimum', 'min').replace('maximum', 'max').replace(
'bedrooms', 'beds').replace('bathrooms', 'baths').replace(
'#_of_', 'num_').replace('sq. ft.', 'sqft'))
return modified_list
def top_info_parser(soup):
'''Starting with getting the information at the very top of the page.
This takes information from the top of the page that highlights the main
attributes of the home, including latitude and longitude.'''
all_top = soup.findAll('div', {'class': 'HomeInfo inline-block'})
top_info_dict = {}
values_ = []
cats_ = []
sqft = []
lat_lon = []
for num in all_top:
# Getting the address
address_ = num.findAll('span', {'class': 'street-address'})
top_info_dict['address'] = [num.text for num in address_][0]
# Getting the city
city_ = num.findAll('span', {'class': 'locality'})
top_info_dict['city'] = [num.text for num in city_][0]
# Getting the state (maybe not needed?)
state_ = num.findAll('span', {'class': 'region'})
top_info_dict['state'] = [num.text for num in state_][0]
# Getting the zip-code
zip_code_ = num.findAll('span', {'class': 'postal-code'})
top_info_dict['zip_code'] = [num.text for num in zip_code_][0]
'''Getting the Redfin Estimate. This is important, since if the home
was sold a few months ago, the search should focus on the homes current
value and not for what it sold for. This make the results far more
efficiant.'''
red_est = num.findAll('div', {'class': 'info-block avm'})
for i in red_est:
values_.append(i.div.text)
cats_.append(i.span.text)
# If the Redfin estimate is not available, this is the fall back option.
price_ = num.findAll('div', {'class': 'info-block price'})
for i in price_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bedrooms
bdrs_ = num.findAll('div', {'data-rf-test-id': 'abp-beds'})
for i in bdrs_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting number of bathrooms
bths_ = num.findAll('div', {'data-rf-test-id': 'abp-baths'})
for i in bths_:
values_.append(i.div.text)
cats_.append(i.span.text)
# Getting size of the home
sqft_ = num.findAll('div', {'data-rf-test-id': 'abp-sqFt'})
for i in sqft_:
top_info_dict['sqft'] = i.span.text[:6]
# Getting the year the home was built in
yrblt_ = num.findAll('div', {'class': 'HomeBottomStats'})
for i in yrblt_:
lbls_ = i.findAll('span', {'class': 'label'})
vals_ = i.findAll('span', {'class': 'value'})
for j in lbls_:
cats_.append(j.text)
for k in vals_:
values_.append(k.text)
# Getting latitude and longitude of the home
lat_lon_ = num.findAll('span', {'itemprop': 'geo'})
for i in lat_lon_:
ll_ = i.findAll('meta')
for num in ll_:
lat_lon.append(num['content'])
if len(lat_lon) >= 2:
top_info_dict['latitude'] = lat_lon[0]
top_info_dict['longitude'] = lat_lon[1]
# Checking to make sure the values are present for the fields
# If they are not avaialble, get rid of them.
values_ = [num for num in values_ if num != '—']
cats_ = [num for num in cats_ if num != '—']
# Putting everything in a dictionary, since it removes redundant columns
info_dict = dict(zip(cats_, values_))
# Merging the two dictionaries
all_info_dict = {**top_info_dict, **info_dict}
# Getting the home description
home_description = soup.find('p', {'class': 'font-b1'})
if home_description is not None:
all_info_dict['description'] = home_description.span.text
else:
all_info_dict['description'] = 'N/A'
return all_info_dict
def public_info_parser(soup):
'''Getting information from tax sources to ensure all the home information
matches from Zillow, Agent, and Tax records.'''
all_info = soup.findAll('div', {'data-rf-test-id': 'publicRecords'})
label_list = []
values_list = []
for num in all_info:
cats = num.findAll('span', {'class': 'table-label'})
for i in cats:
label_list.append(i.text)
for num in all_info:
vals = num.findAll('div', {'class': 'table-value'})
for i in vals:
values_list.append(i.text)
public_info_dict = dict(zip(label_list, values_list))
return public_info_dict
def school_parser(soup):
''' Getting schools and the grades they attend with their score from
GreatSchools this will be added as a feature for homes bigger than
three bedrooms and all single family homes.'''
school_dict = {}
school_info = soup.findAll('div', {'class': "name-and-info"})
school_names = []
school_grades = []
school_ratings = []
for num in school_info:
s_name = num.findAll('div', {'data-rf-test-name': 'school-name'})
s_grade = num.findAll('div', {'class': re.compile('^sub-info')})
s_rating = num.findAll('div', {'class': 'gs-rating-row'})
for i in s_name:
school_names.append(i.text)
for j in s_grade:
school_grades.append(j.text.replace(
' • Serves this home', '').replace(' • ', ' - '))
for k in s_rating:
school_ratings.append(
k.text[-5:].replace(' ', '').replace('/10', ''))
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('k' in school_grades[w] or 'Pre' in school_grades)
or '5' in school_grades[w]) or 'Elementary' in school_names[w])):
school_dict['elem_school_name'] = school_names[w]
school_dict['elem_school_grades'] = school_grades[
w].split(' - ', 1)[1]
school_dict['elem_school_rating'] = school_ratings[w]
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and ((
('7' in school_grades[w] or '8' in school_grades)
or 'Middle' in school_names[w]) or 'Junior' in school_names[w])):
school_dict['middle_school_name'] = school_names[w].title()
school_dict['middle_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['middle_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
w = 0
while w < len(school_names):
if ('Public' in school_grades[w] and (
('12' in school_grades or 'High' in school_names[w]))):
school_dict['high_school_name'] = school_names[w].title()
school_dict['high_school_grades'] = school_grades[
w].split(' - ', 1)[1].title()
school_dict['high_school_rating'] = school_ratings[w].title()
w += 1
else:
w += 1
if 'elem_school_name' not in school_dict.keys():
school_dict['elem_school_name'] = 'N/A'
school_dict['elem_school_grades'] = 'N/A'
school_dict['elem_school_rating'] = 'N/A'
if 'middle_school_name' not in school_dict.keys():
school_dict['middle_school_name'] = 'N/A'
school_dict['middle_school_grades'] = 'N/A'
school_dict['middle_school_rating'] = 'N/A'
if 'high_school_name' not in school_dict.keys():
school_dict['high_school_name'] = 'N/A'
school_dict['high_school_grades'] = 'N/A'
school_dict['high_school_rating'] = 'N/A'
return school_dict
def feats_parser(soup):
'''All the listed features by the agent/broker inputting the listing
on the MLS.'''
all_home_feats = soup.findAll('span', {'class': "entryItemContent"})
feat_cats = []
feat_vals = []
for num in all_home_feats:
feat_cats.append(num.contents[0])
for num in all_home_feats:
feat_vals.append(num.span.text)
cats_set = set(feat_cats)
vals_set = set(feat_vals)
redundant = cats_set & vals_set
for num in redundant:
feat_cats.remove(num)
feat_vals.remove(num)
feat_cats = [str(num) for num in feat_cats]
feat_vals = [str(num) for num in feat_vals]
feats_dict = dict(zip(feat_cats, feat_vals))
extra_feats = []
for k, v in feats_dict.items():
if 'span>' in k:
extra_feats.append(k)
for num in extra_feats:
if num in feats_dict.keys():
feats_dict.pop(num)
# This is to replace all the HTML tags
extra_feats = [num.replace('<span>', '').replace('</span>', '').replace(
'<a href=', '').replace('"', '').replace(' rel=nofollow', '').replace(
' target=_blank>', '').replace('Virtual Tour (External Link)', '').replace(
'</a', '').replace('>', '').replace('&', '&').replace('(s)', '') for num
in extra_feats]
x_feat_string = ', '.join([num for num in extra_feats])
x_feat_string = x_feat_string.split(sep=', ')
x_feat_list = list(set(x_feat_string))
feats_dict['extra_feats'] = ', '.join([num for num in x_feat_list])
return feats_dict
def additional_info(soup):
'''Need to get additional information, so we don't miss anything that
could prove to be critical later.'''
cats_ = soup.findAll('span', {'class': re.compile('^header ')})
cats_ = [num.text for num in cats_]
vals_ = soup.findAll('span', {'class': re.compile('^content ')})
vals_ = [num.text for num in vals_]
cats_ = [str(num).replace('Property Type', 'prop_type').replace(
'HOA Dues', 'hoa_fees').replace('Type', 'prop_type') for num in cats_]
vals_ = [str(num).replace('$', '').replace('/month', '').replace(
'Hi-Rise', 'Condo').replace('Residential', 'Single Family Residence')
for num in vals_]
return dict(zip(cats_, vals_))
def info_from_property(soup):
''' Putting all the information together in a Dataframe and removing any
duplicate columns.'''
top_info_dict = top_info_parser(soup)
public_info_dict = public_info_parser(soup)
school_dict = school_parser(soup)
all_home_feats = feats_parser(soup)
mid_info_feats = additional_info(soup)
df1 = | pd.DataFrame(top_info_dict, index=[1]) | pandas.DataFrame |
"""
Функции и классы для проведения WoE-преобразований
"""
import math
import warnings
import numpy as np
import pandas as pd
import sklearn as sk
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm
class _GroupedPredictor(pd.DataFrame):
"""
Вспомогательный класс для удобства доступа к некоторым данным
"""
def get_predictor(self, x):
"""
Получение подвыборки по имени предиктора(ов)
Parameters
---------------
x : str/int/list-like
Предиктор или список предикторов
Returns:
-----------
self : pd.DataFrame
Часть датафрейма (самого себя)
"""
if isinstance(x, (list, set, tuple)):
return self[self["predictor"].isin(x)]
else:
return self[self["predictor"] == x]
def append(self, other):
return _GroupedPredictor(super().append(other))
class WoeTransformer(TransformerMixin, BaseEstimator):
"""Класс для построения и применения WOE группировки к датасету
Parameters
----------
min_sample_rate : float, default 0.05
Минимальный размер группы (доля от размера выборки)
min_count : int, default 3
Минимальное количество наблюдений каждого класса в группе
save_data : bool, default False
Параметр, определяющий, нужно ли сохранить данные для обучения
трансформера внутри экземпляра класса
join_bad_categories : bool, default False
Определяет, должени ли трансформер предпринять попытку для объединения
катогориальных групп в более крупные
Warning
-------
join_bad_categories - Экспериментальная функция.
Способ группировки категорий нестабилен
Attributes
----------
stats : pandas.DataFrame
Результаты WOE-группировки по всем предикторам
predictors : list
Список предикторов, на которых была построена группировка
cat_values : dict[str, list]
Словарь со списками категорий по предикторам, переданный при обучении
alpha_values : dict[str, float]
Словарь со значениями alpha для регуляризации групп
possible groups : pandas.DataFrame
Данные о значениях предиктора, которые могли бы стать
отдельными категориями
bad_groups : pandas.DataFrame
Данные о группах, которые не удовлетворяют условиям
"""
def __repr__(self):
return "WoeTransformer(min_sample_rate={!r}, min_count={!r}, n_fitted_predictors={!r})".format(
self.min_sample_rate,
self.min_count,
len(self.predictors),
)
def __init__(
self,
min_sample_rate: float = 0.05,
min_count: int = 3,
save_data: bool = False,
join_bad_categories: bool = False,
):
"""
Инициализация экземпляра класса
"""
self.min_sample_rate = min_sample_rate
self.min_count = min_count
self.predictors = []
self.alpha_values = {}
self.save_data = save_data
self.join_bad_categories = join_bad_categories
# -------------------------
# Функции интерфейса класса
# -------------------------
def fit(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
Parameters
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-------
self : WoeTransformer
"""
# Сброс текущего состояния трансформера
self._reset_state()
# Сохранение категориальных значений
self.cat_values = cat_values
# Валидация данных и решейпинг
if hasattr(self, "_validate_data"):
X, y = self._validate_and_convert_data(X, y)
if self.save_data:
self.data = X
self.target = y
# Инициализация коэффициентов для регуляризации групп
self.alpha_values = {i: 0 for i in X.columns}
self.alpha_values.update(alpha_values)
# Агрегация значений предикторов
self._grouping(X, y)
# Расчет WOE и IV
self._fit_numeric(X, y)
# Поиск потенциальных групп
# Поиск "плохих" групп
self._get_bad_groups()
return self
def transform(self, X, y=None):
"""
Применение обученного трансформера к новым данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм, который нужно преобразовать
Предикторы, которые не были сгруппированы ранее, будут
проигнорированы и выведется сообщение
y : pandas.Series
Игнорируется
Returns
-----------
transformed : pandas.DataFrame
Преобразованный датасет
"""
transformed = pd.DataFrame()
if hasattr(self, "_validate_data"):
try:
X, y = self._validate_and_convert_data(X, y)
except AttributeError:
pass
for i in X:
if i in self.predictors:
try:
transformed[i] = self._transform_single(X[i])
except Exception as e:
print(f"Transform failed on predictor: {i}", e)
else:
print(f"Column is not in fitted predictors list: {i}")
return transformed
def fit_transform(self, X, y, cat_values={}, alpha_values={}):
"""
Обучение трансформера и расчет всех промежуточных данных
с последующим примененим группировки к тем же данным
Parameters
---------------
X : pandas.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pandas.Series
Целевая переменная
cat_values : dict[str, list[str]], optional
Словарь списков с особыми значениями, которые нужно
выделить в категории
По умолчанию все строковые и пропущенные значения
выделяются в отдельные категории
alpha_values : dict[str, float], optional
Словарь со значениями alpha для регуляризации WOE-групп
Returns
-----------
transformed : pd.DataFrame
Преобразованный датасет
"""
self.fit(X, y, cat_values=cat_values, alpha_values=alpha_values)
return self.transform(X)
def plot_woe(self, predictors=None):
"""
Отрисовка одного или нескольких графиков группировки
Parameters
---------------
predictors : str or array, default None
Предиктор(ы), по которым нужны графики
-- если str - отрисовывается один график
-- если array - отрисовываются графики из списка
-- если None - отрисовываются все сгруппированные предикторы
Warning
-------
Запуск метода без аргументов может занять длительное время при большом
количестве предикторов
"""
if predictors is None:
predictors = self.predictors
elif isinstance(predictors, str):
predictors = [predictors]
elif isinstance(predictors, (list, tuple, set)):
predictors = predictors
_, axes = plt.subplots(figsize=(10, len(predictors) * 5), nrows=len(predictors))
try:
for i, col in enumerate(predictors):
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes[i])
except TypeError:
self._plot_single_woe_grouping(self.stats.get_predictor(col), axes)
# return fig
def get_iv(self, sort=False):
"""Получение списка значений IV по предикторам
Parameters
----------
sort : bool, default False
Включает сортировку результата по убыванию IV
Returns
-------
pandas.Series
"""
try:
res = self.stats.groupby("predictor")["IV"].sum()
if sort:
res = res.sort_values(ascending=False)
res = dict(res)
except AttributeError as e:
print(f"Transformer was not fitted yet. {e}")
res = {}
return res
# -------------------------
# Внутренние функции над всем датасетом
# -------------------------
def _validate_and_convert_data(self, X, y):
"""Проверяеn входные данные, трансформирует в объекты pandas
Использует метод _validate_data из sklearn/base.py
"""
if hasattr(X, "columns"):
predictors = X.columns
else:
predictors = ["X" + str(i + 1) for i in range(X.shape[1])]
if y is None:
X_valid = self._validate_data(X, y, dtype=None, force_all_finite=False)
X_valid = pd.DataFrame(X, columns=predictors)
y_valid = None
else:
X_valid, y_valid = self._validate_data(
X, y, dtype=None, force_all_finite=False
)
y_valid = pd.Series(y, name="target")
X_valid = pd.DataFrame(X, columns=predictors)
return X_valid, y_valid
def _grouping(self, X, y):
"""
Применение группировки ко всем предикторам
"""
df = X.copy()
df = df.fillna("пусто")
df["target"] = y.copy()
# Группировка и расчет показателей
for col in df.columns[:-1]:
grouped_temp = self._group_single(df[col], y)
num_mask = self._get_nums_mask(grouped_temp["value"])
cat_val_mask = grouped_temp["value"].isin(self.cat_values.get(col, []))
is_all_categorical = all(~num_mask | cat_val_mask)
if self.join_bad_categories and is_all_categorical:
repl = self._get_cat_values_for_join(grouped_temp)
grouped_temp = self._group_single(df[col].replace(repl), y)
self.grouped = self.grouped.append(grouped_temp)
# Замена пустых значений обратно на np.nan ИЛИ преобразование в числовой тип
try:
self.grouped["value"] = self.grouped["value"].replace({"пусто": np.nan})
except TypeError:
self.grouped["value"] = pd.to_numeric(
self.grouped["value"], downcast="signed"
)
def _fit_numeric(self, X, y):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
Returns
-------
None
"""
res = pd.DataFrame()
for i in X:
res_i = self._fit_single(X[i], y)
res = res.append(res_i)
self.predictors.append(i)
self.stats = self.stats.append(res)
# -------------------------
# Внутренние функции над отдельными столбцами
# -------------------------
def _group_single(self, x, y):
"""
Агрегация данных по значениям предиктора.
Рассчитывает количество наблюдений,
количество целевых событий, долю группы от общего числа наблюдений
и долю целевых в группе
Parameters:
---------------
X : pandas.DataFrame
Таблица данных для агрегации
y : pandas.Series
Целевая переменная
"""
col = x.name
df = pd.DataFrame({col: x.values, "target": y.values})
grouped_temp = df.groupby(col)["target"].agg(["count", "sum"]).reset_index()
grouped_temp.columns = ["value", "sample_count", "target_count"]
grouped_temp["sample_rate"] = (
grouped_temp["sample_count"] / grouped_temp["sample_count"].sum()
)
grouped_temp["target_rate"] = (
grouped_temp["target_count"] / grouped_temp["sample_count"]
)
grouped_temp.insert(0, "predictor", col)
return _GroupedPredictor(grouped_temp)
def _fit_single(self, x, y, gr_subset=None, cat_vals=None):
"""
Расчет WOE и IV
Parameters:
---------------
X : pd.DataFrame
Датафрейм с предикторами, которые нужно сгруппировать
y : pd.Series
Целевая переменная
gr_subset : _GroupedPredictor
Предиктор
"""
gr_subset_num = pd.DataFrame()
gr_subset_cat = pd.DataFrame()
col = x.name
if gr_subset is None:
gr_subset = self.grouped.get_predictor(col)
if cat_vals is None:
cat_vals = self.cat_values.get(col, [])
nan_mask = x.isna()
num_mask = self._get_nums_mask(x) & (~x.isin(cat_vals)) & (~nan_mask)
num_vals = x.loc[num_mask].unique()
try:
# Расчет коэффициентов тренда по числовым значениям предиктора
if num_mask.sum() > 0:
try:
poly_coefs = np.polyfit(
x.loc[num_mask].astype(float), y.loc[num_mask], deg=1
)
except np.linalg.LinAlgError as e:
print(f"Error in np.polyfit on predictor: '{col}'.\nError MSG: {e}")
print("Linear Least Squares coefficients were set to [1, 0]")
poly_coefs = np.array([1, 0])
self.trend_coefs.update({col: poly_coefs})
# Расчет монотонных границ
gr_subset_num = gr_subset[gr_subset["value"].isin(num_vals)].copy()
gr_subset_num["value"] = pd.to_numeric(gr_subset_num["value"])
gr_subset_num = gr_subset_num.sort_values("value")
borders = self._monotonic_borders(gr_subset_num, self.trend_coefs[col])
self.borders.update({col: borders})
# Применение границ к сгруппированным данным
gr_subset_num["groups"] = pd.cut(gr_subset_num["value"], borders)
gr_subset_num["type"] = "num"
except ValueError as e:
print(f"ValueError on predictor {col}.\nError MSG: {e}")
# Расчет коэффициентов тренда по категориальным значениям предиктора
if (~num_mask).sum() > 0:
gr_subset_cat = gr_subset[~gr_subset["value"].isin(num_vals)].copy()
gr_subset_cat["groups"] = gr_subset_cat["value"].fillna("пусто")
gr_subset_cat["type"] = "cat"
# Объединение числовых и категориальных значений
gr_subset = pd.concat([gr_subset_num, gr_subset_cat], axis=0, ignore_index=True)
# Расчет WOE и IV
alpha = self.alpha_values.get(col, 0)
res_i = self._statistic(gr_subset, alpha=alpha)
is_empty_exists = any(res_i["groups"].astype(str).str.contains("пусто"))
if is_empty_exists:
res_i["groups"].replace({"пусто": np.nan}, inplace=True)
return res_i
def _transform_single(self, x, stats=None):
"""
Применение группировки и WoE-преобразования
Parameters
---------------
x : pandas.Series
Значения предиктора
Returns
---------------
X_woe : pandas.DataFrame
WoE-преобразования значений предиктора
WoE = 0, если группа не встречалась в обучающей выборке
"""
orig_index = x.index
X_woe = x.copy()
if stats is None:
stats = self.stats.get_predictor(X_woe.name)
# Маппинги для замены групп на соответствующие значения WOE
num_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "num"
}
cat_map = {
stats.loc[i, "groups"]: stats.loc[i, "WOE"]
for i in stats.index
if stats.loc[i, "type"] == "cat"
}
# Категориальные группы
cat_bounds = stats.loc[stats["type"] == "cat", "groups"]
# predict по числовым значениям
DF_num = stats.loc[stats["type"] == "num"]
if DF_num.shape[0] > 0:
# Границы (правые) интервалов для разбивки числовых переменных
num_bounds = [-np.inf] + list(
pd.IntervalIndex(stats.loc[stats["type"] == "num", "groups"]).right
)
# Выделение только числовых значений предиктора
# (похожих на числа и тех, что явно не указаны как категориальные)
X_woe_num = pd.to_numeric(
X_woe[(self._get_nums_mask(X_woe)) & (~X_woe.isin(cat_bounds))]
)
# Разбивка значений на интервалы в соответствии с группировкой
X_woe_num = pd.cut(X_woe_num, num_bounds)
# Замена групп на значения WOE
X_woe_num = X_woe_num.replace(num_map)
X_woe_num.name = "woe"
else:
X_woe_num = pd.Series()
# predict по категориальным значениям (может обновлять значения по числовым)
DF_cat = stats.loc[stats["type"] == "cat"]
if DF_cat.shape[0] > 0:
# Выделение строковых значений и тех, что явно выделены как категориальные
X_woe_cat = X_woe[X_woe.isin(cat_map.keys())]
# Замена групп на значения WOE
X_woe_cat = X_woe_cat.replace(cat_map)
else:
X_woe_cat = pd.Series()
# predict по новым категориям (нечисловые: которых не было при групприровке)
# Сбор индексов категориальных и числовых значений
used_index = np.hstack([X_woe_cat.index, X_woe_num.index])
if len(used_index) < len(x):
X_woe_oth = X_woe.index.drop(used_index)
X_woe_oth = pd.Series(0, index=X_woe_oth)
else:
X_woe_oth = | pd.Series() | pandas.Series |
def Dereplicator(params : dict, db_params : dict):
def Filter_choices_outer(db_params):
def Three_filters(table):
return ['|'.join(table[db_params['db_rt_field']]), '|'.join(table[db_params['db_adduct_field']]), '|'.join(table[db_params['db_formula_field_database']])]
def RT_adducts(table):
return ['|'.join(table[db_params['db_rt_field']]), '|'.join(table[db_params['db_adduct_field']])]
def RT_formula(table):
return ['|'.join(table[db_params['db_rt_field']]), '|'.join(table[db_params['db_formula_field_database']])]
def Adducts_formula(table):
return ['|'.join(table[db_params['db_adduct_field']]), '|'.join(table[db_params['db_formula_field_database']])]
def RT_only(table):
return ['|'.join(table[db_params['db_rt_field']])]
def Adduct_only(table):
return ['|'.join(table[db_params['db_adduct_field']])]
def Formula_only(table):
return ['|'.join(table[db_params['db_formula_field_database']])]
def No_fields(table):
return []
if (db_params['db_rt_field'] != None) and (db_params['db_adduct_field'] != None) and (db_params['db_formula_field_database'] != None):
return Three_filters, ['rt', 'adduct', 'formula']
elif db_params['db_rt_field'] != None and (db_params['db_adduct_field'] != None):
return RT_adducts, ['rt', 'adduct']
elif db_params['db_rt_field'] != None and (db_params['db_formula_field_database'] != None):
return RT_formula, ['rt', 'formula']
elif db_params['db_adduct_field'] != None and (db_params['db_formula_field_database'] != None):
return Adducts_formula, ['adduct', 'formula']
elif db_params['db_rt_field'] != None:
return RT_only, ['rt']
elif db_params['db_adduct_field'] != None:
return Adduct_only, ['adduct']
elif db_params['db_formula_field_database'] != None:
return Adduct_only, ['formula']
else:
return No_fields, []
def Spectrum_processing(s):
s = default_filters(s)
return s
def Float_prec_mz(s):
s = s.set(db_params['db_mass_field'], float(s.get(db_params['db_mass_field'])))
return s
def Column_correction(table):
drop_col = [i for i in table.columns if "Unnamed" in i]
table.drop(drop_col, axis = 1, inplace = True)
return table
def Database_table_mgf(database_mgf, db_params : dict) :
"""Extract the metadata from the MGF file
"""
def Mz_field_choices():
def Get_mz_list(s):
return database_mgf[s].get(db_params['db_mass_field'])[0]
def Get_mz(s):
return database_mgf[s].get(db_params['db_mass_field'])
if isinstance(database_mgf[0].get(db_params['db_mass_field']), list) :
return Get_mz_list
else:
return Get_mz
def Filter_choices(db_params):
def Three_filters(s):
return [database_mgf[s].get(db_params['db_rt_field']), database_mgf[s].get(db_params['db_adduct_field']), database_mgf[s].get(db_params['db_formula_field_database'])]
def RT_adduct(s):
return [database_mgf[s].get(db_params['db_rt_field']), database_mgf[s].get(db_params['db_adduct_field'])]
def RT_formula(s):
return [database_mgf[s].get(db_params['db_rt_field']), database_mgf[s].get(db_params['db_formula_field_database'])]
def Adduct_formula(s):
return [database_mgf[s].get(db_params['db_adduct_field']), database_mgf[s].get(db_params['db_formula_field_database'])]
def RT_only(s):
return [database_mgf[s].get(db_params['db_rt_field'])]
def Adduct_only(s):
return [database_mgf[s].get(db_params['db_adduct_field'])]
def Formula_only(s):
return [database_mgf[s].get(db_params['db_formula_field_database'])]
def No_fields(s):
return []
if (db_params['db_rt_field'] != None) and (db_params['db_adduct_field'] != None) and (db_params['db_formula_field_database'] != None):
return Three_filters, ['rt', 'adduct', 'formula']
elif (db_params['db_rt_field'] != None) and (db_params['db_adduct_field'] != None):
return RT_adduct, ['rt', 'adduct']
elif (db_params['db_rt_field'] != None) and (db_params['db_formula_field_database'] != None):
return RT_formula, ['rt', 'formula']
elif (db_params['db_adduct_field'] != None) and (db_params['db_formula_field_database'] != None):
return Adduct_formula, ['adduct', 'formula']
elif db_params['db_rt_field'] != None:
return RT_only, ['rt']
elif db_params['db_adduct_field'] != None:
return Adduct_only, ['adduct']
elif db_params['db_formula_field_database'] != None:
return Formula_only, ['formula']
else:
return No_fields, []
database_table = list()
Filter_fields, cols = Filter_choices(db_params)
Mz_extractor = Mz_field_choices()
col_names = ['name', 'mz', 'unique_field'] + cols + db_params['db_export_fields'] + ['ion_mode']
print('Extracting database metadata...')
for i in tqdm(range(len(database_mgf))):
name = database_mgf[i].get(db_params['db_name_field'])
mz = Mz_extractor(i)
unique_field = database_mgf[i].get(db_unique_field)
if unique_field == "" : unique_field = None
filter_fields = Filter_fields(i)
other_fields = [database_mgf[i].get(field) for field in db_params['db_export_fields']]
other_fields = [None if f == "" else f for f in other_fields]
ion_mode = database_mgf[i].get(db_mode_field)
new_row = [name, mz, unique_field] + filter_fields + other_fields + [ion_mode]
database_table.append((new_row))
database_table = pd.DataFrame(database_table, columns = col_names)
return database_table
def Database_table_csv(database_csv, db_params : dict) :
"""Extract the metadata from the MGF file
"""
def Filter_choices(db_params):
def Three_filters(s):
return [database_csv.loc[s, db_params['db_rt_field']],
database_csv.loc[s, db_params['db_adduct_field']],
database_csv.loc[s, db_params['db_formula_field_database']]]
def RT_adduct(s):
return [database_csv.loc[s, db_params['db_rt_field']],
database_csv.loc[s, db_params['db_adduct_field']]]
def RT_formula(s):
return [database_csv.loc[s, db_params['db_rt_field']],
database_csv.loc[s, db_params['db_formula_field_database']]]
def Adduct_formula(s):
return [database_csv.loc[s, db_params['db_adduct_field']],
database_csv.loc[s, db_params['db_formula_field_database']]]
def RT_only(s):
return [database_csv.loc[s, db_params['db_rt_field']]]
def Adduct_only(s):
return [database_csv.loc[s, db_params['db_adduct_field']]]
def Formula_only(s):
return [database_csv.loc[s, db_params['db_formula_field_database']]]
def No_fields(s):
return []
if (db_params['db_rt_field'] != None) and (db_params['db_adduct_field'] != None) and (db_params['db_formula_field_database'] != None):
return Three_filters, ['rt', 'adduct', 'formula']
elif (db_params['db_rt_field'] != None) and (db_params['db_adduct_field'] != None):
return RT_adduct, ['rt', 'adduct']
elif (db_params['db_rt_field'] != None) and (db_params['db_formula_field_database'] != None):
return RT_formula, ['rt', 'formula']
elif (db_params['db_adduct_field'] != None) and (db_params['db_formula_field_database'] != None):
return Adduct_formula, ['adduct', 'formula']
elif db_params['db_rt_field'] != None:
return RT_only, ['rt']
elif db_params['db_adduct_field'] != None:
return Adduct_only, ['adduct']
elif db_params['db_formula_field_database'] != None:
return Formula_only, ['formula']
else:
return No_fields, []
database_table = list()
Filter_fields, cols = Filter_choices(db_params)
col_names = ['name', 'mass'] + cols + db_params['db_export_fields']
print('Extracting database metadata...')
for i in tqdm(database_csv.index):
name = database_csv.loc[i, db_params['db_name_field']]
mass = database_csv.loc[i, db_params['db_mass_field']]
filter_fields = Filter_fields(i)
other_fields = [database_csv.loc[i, field] for field in db_params['db_export_fields']]
new_row = [name, mass] + filter_fields + other_fields
database_table.append((new_row))
database_table = pd.DataFrame(database_table, columns = col_names)
return database_table
def Samplewise_export(neg_csv_file, pos_csv_file, out_path, merged_edge_table, merged_node_table) :
print("Exporting sample-wise tables...")
neg_csv = pd.read_csv(neg_csv_file, index_col ="row ID")
pos_csv = pd.read_csv(pos_csv_file, index_col ="row ID")
neg_csv = Column_correction(neg_csv)
pos_csv = Column_correction(pos_csv)
neg_csv.columns = neg_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('NEG_', '', regex = False)
pos_csv.columns = pos_csv.columns.str.replace(".mzXML Peak area", "", regex = False).str.replace('POS_', '', regex = False)
neg_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
pos_csv.drop(["row m/z", "row retention time"], axis = 1, inplace = True)
samples = list(set(list(neg_csv.columns) + list(pos_csv.columns)))
samples.sort()
for sample in tqdm(samples):
#sample = samples[0]
ion_ids_neg = neg_csv.index[neg_csv[sample] > 0.0]
ion_ids_pos = pos_csv.index[pos_csv[sample] > 0.0]
#convert feature_ids to the new indexes
tmp_table = merged_node_table[merged_node_table['status'] != "neg_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "pos_neutral"]
tmp_table = tmp_table[tmp_table['status'] != "mix_neutral"]
tmp_table_pos = tmp_table[tmp_table['ion_mode'] == "POS"]
tmp_table_neg = tmp_table[tmp_table['ion_mode'] == "NEG"]
ion_idx_neg = pd.Series(tmp_table_neg.index, index = tmp_table_neg['feature_id'])
ion_idx_neg = list(ion_idx_neg[ion_ids_neg])
ion_idx_pos = pd.Series(tmp_table_pos.index, index = tmp_table_pos['feature_id'])
ion_idx_pos = list(ion_idx_pos[ion_ids_pos])
ion_idx_mix = ion_idx_neg + ion_idx_pos
# Get sample neutrals
neutral_edges = merged_edge_table.loc[merged_edge_table["Adnotation"].dropna().index]
kept_edges = [i for i in neutral_edges.index if neutral_edges.loc[i, "node_2"] in ion_idx_mix]
# Get ion edges
ion_edges = merged_edge_table[merged_edge_table['status'] != "neg_add_edge"]
ion_edges = ion_edges[ion_edges['status'] != "pos_add_edge"]
for i in ion_edges.index:
if ion_edges.loc[i, "node_1"] in ion_idx_mix:
if ion_edges.loc[i, "node_2"] in ion_idx_mix:
kept_edges.append(i)
kept_edges.sort()
sample_edges = merged_edge_table.loc[kept_edges]
sample_edges.sort_values('node_1', inplace = True)
sample_edges.reset_index(inplace = True, drop = True)
kept_nodes = list(set(list(sample_edges['node_1']) + list(sample_edges['node_2'])))
kept_nodes.sort()
sample_nodes = merged_node_table.loc[kept_nodes].copy()
sample_nodes.drop(pd.Series(samples) + ".mzXML Peak area", axis = 1, inplace = True)
sample_nodes[sample] = merged_node_table[sample + ".mzXML Peak area"]
sample_nodes.to_csv(out_path + "MIX_" + sample + "_nodes.csv", index_label = "Index")
sample_edges.to_csv(out_path + "MIX_" + sample + "_edges.csv", index_label = "Index")
return
import os
import pandas as pd
import numpy as np
from tqdm import tqdm
from matchms.importing import load_from_mgf
from matchms.filtering import default_filters
from matchms.similarity import ModifiedCosine
# Load parameters
data_modes= params['single_mode']
mgf_file_neg= params['neg_mgf']
mgf_file_pos= params['pos_mgf']
neg_csv_file= params['neg_csv']
pos_csv_file= params['pos_csv']
input_mgf_neg_path= params['neg_out_0']
input_mgf_pos_path= params['pos_out_0']
node_table_path= params['mix_out_4_1']
out_path_full= params['mix_out_5_1']
out_path_samples= params['mix_out_5_2']
db_prefix= db_params['db_prefix']
db_type= db_params['db_type']
db_folder= db_params['db_folder']
db_file= db_params['db_file']
db_cosine= db_params['db_cosine']
db_matched_peaks= db_params['db_matched_peaks']
db_prec_error= db_params['db_prec_error']
db_mass_error= db_params['db_mass_error']
db_rt_error= db_params['db_rt_error']
db_hits= db_params['db_hits']
db_adduct_filter= db_params['db_adduct_filter']
db_adduct_field= db_params['db_adduct_field']
db_rt_filter= db_params['db_rt_filter']
db_mode_field= db_params['db_mode_field']
db_unique_field= db_params['db_unique_field']
db_export_fields= db_params['db_export_fields']
db_prefix= db_params['db_prefix']
#Create folders and load tables
if not os.path.isdir(out_path_full) :
os.mkdir(out_path_full)
if ((os.path.isdir(out_path_full)) and (len(os.listdir(out_path_full)) == 0)):
if data_modes == "BOTH":
edge_table = pd.read_csv(node_table_path + 'MIX_edges.csv', index_col = "Index")
node_table = pd.read_csv(node_table_path + 'MIX_nodes.csv', index_col = "Index",
dtype={'mz': 'float',
'rt': 'float',
'TIC' : 'float',
'charge' : 'int',
'mgf_index' : 'float',
'status' : 'str',
'Adnotation' : 'str',
'ion_mode' : 'str',
'feature_id' : 'float',
'cluster_id' : 'int'})
elif data_modes == "POS":
node_table_path = params['pos_out_3_1']
edge_table = pd.read_csv(node_table_path + 'POS_edges.csv', index_col = "Index")
node_table = pd.read_csv(node_table_path + 'POS_nodes.csv',
dtype={'feature_id' : 'float',
'mz': 'float',
'rt': 'float',
'TIC' : 'float',
'charge' : 'int',
'mgf_index' : 'float',
'rule_points' : 'int',
'status' : 'str',
'adduct_count' : 'str',
'Adnotation' : 'str'})
node_table['new_index'] = node_table["feature_id"].astype(int)
node_table.set_index("new_index", inplace = True)
node_table['ion_mode'] = ['POS']*len(node_table)
node_table['status_universal'] = node_table['status'].copy()
elif data_modes == "NEG":
node_table_path = params['neg_out_3_1']
edge_table = pd.read_csv(node_table_path + 'NEG_edges.csv', index_col = "Index")
node_table = pd.read_csv(node_table_path + 'NEG_nodes.csv',
dtype={'feature_id' : 'float',
'mz': 'float',
'rt': 'float',
'TIC' : 'float',
'charge' : 'int',
'mgf_index' : 'float',
'rule_points' : 'int',
'status' : 'str',
'adduct_count' : 'str',
'Adnotation' : 'str'})
node_table['new_index'] = node_table["feature_id"].astype(int)
node_table.set_index("new_index", inplace = True)
node_table['ion_mode'] = ['NEG']*len(node_table)
node_table['status_universal'] = node_table['status'].copy()
else:
raise Exception('single_mode parameter in the params file badly set, please use either "POS", "NEG" or "BOTH".')
else:
edge_table = pd.read_csv(out_path_full + 'MIX_edges.csv', index_col = "Index")
node_table = pd.read_csv(out_path_full + 'MIX_nodes.csv', index_col = "Index",
dtype={'mz': 'float',
'rt': 'float',
'TIC' : 'float',
'charge' : 'int',
'mgf_index' : 'float',
'status' : 'str',
'Adnotation' : 'str',
'ion_mode' : 'str',
'feature_id' : 'float',
'cluster_id' : 'int'})
if not os.path.isdir(out_path_samples) :
os.mkdir(out_path_samples)
# Process tables and remove samples to be stored in a samples DF
node_table['Adnotation'] = node_table['Adnotation'].replace({np.nan: None})
samples = list(node_table.columns[node_table.columns.str.contains('Peak area')]) # safe samples in a separate DF
samples_df = node_table[samples]
node_table.drop(samples, axis = 1, inplace = True) # delete samples from the node table
###########################################################################
if db_type == 'ion': # If database is MGF to dereplicate ion (MS/MS) data
###########################################################################
modified_cosine = ModifiedCosine(tolerance=db_mass_error)
Filter_fields_outer, filter_cols = Filter_choices_outer(db_params)
# Load MGF files
if data_modes == "BOTH":
print('Loading NEG MGF file...')
mgf_neg = list(load_from_mgf(input_mgf_neg_path + mgf_file_neg))
mgf_neg = [Spectrum_processing(s) for s in mgf_neg]
print('Loading POS MGF file...')
mgf_pos = list(load_from_mgf(input_mgf_pos_path + mgf_file_pos))
mgf_pos = [Spectrum_processing(s) for s in mgf_pos]
elif data_modes == "POS":
print('Loading POS MGF file...')
mgf_pos = list(load_from_mgf(input_mgf_pos_path + mgf_file_pos))
mgf_pos = [Spectrum_processing(s) for s in mgf_pos]
elif data_modes == "NEG":
print('Loading NEG MGF file...')
mgf_neg = list(load_from_mgf(input_mgf_neg_path + mgf_file_neg))
mgf_neg = [Spectrum_processing(s) for s in mgf_neg]
# Load the database file
print('Loading database file and extracting data...')
database_mgf = list(load_from_mgf(db_folder + db_file))
database_mgf = [Float_prec_mz(s) for s in database_mgf]
database_table = Database_table_mgf(database_mgf, db_params)
# Start dereplication (cosine similarity)
derep_table = list()
print('Starting ion dereplication (cosine similarity)...')
for i in tqdm(node_table.index):
status = node_table.loc[i, "status_universal"]
if status == "neutral" :
new_row = [i, None, None] + [None]*len(filter_cols) + [None]*len(db_export_fields) + [None, None, None]
derep_table.append(new_row)
continue
ion_mz = node_table.loc[i, "mz"]
ion_mgf_idx = int(node_table.loc[i, "mgf_index"])
ion_mode = node_table.loc[i, "ion_mode"]
hits = database_table[database_table['mz'].between(ion_mz - db_prec_error, ion_mz + db_prec_error, inclusive = "both")].copy()
# Ion mode filter
if ion_mode == "NEG":
hits = hits[hits['ion_mode'] == "negative"]
exp_mgf = mgf_neg
else:
hits = hits[hits['ion_mode'] == "positive"]
exp_mgf = mgf_pos
# Optional filters : Adduct and RT
if db_adduct_filter and (status == 'adduct'):
adduct = node_table.loc[i, 'Adnotation']
if adduct != None:
hits = hits[hits['adduct'] == adduct]
if db_rt_filter:
rt = node_table.loc[i, 'rt']
hits = hits[hits['rt'].between(rt - db_rt_error, rt + db_rt_error, inclusive = "both")]
# Calculate cosine similarity if hit table is not empty
similarity_list = list()
for j in hits.index:
score, n_matches = modified_cosine.pair(exp_mgf[ion_mgf_idx], database_mgf[j])
mass_error = abs(ion_mz - hits.loc[j, "mz"])*1000
prod = score * n_matches
similarity_list.append((j, score, n_matches, prod, mass_error))
similarity_list = | pd.DataFrame(similarity_list, columns = ["index", "cos", "matches", "prod", "error_mDa"]) | pandas.DataFrame |
# Preppin' Data 2021 Week 42
import pandas as pd
import numpy as np
# Input the data
df = pd.read_csv('unprepped_data\\PD 2021 Wk 42 Input.csv')
# Create new rows for any date missing between the first and last date in the data set provided
# build a data frame of all dates from min to max
min_date = min(df['Date'])
max_date = max(df['Date'])
idx = pd.date_range(min_date, max_date)
all_dates = pd.DataFrame()
all_dates['Date'] = idx
# dt.normalize() to remove time component and keep as date type
all_dates['Date'] = all_dates['Date'].dt.normalize()
# merge all_dates with original dataframe,
df['Date'] = | pd.to_datetime(df['Date'], format='%d/%M/%Y') | pandas.to_datetime |
import pandas as pd
from hdmf.container import Table, Row, RowGetter
from hdmf.testing import TestCase
class TestTable(TestCase):
@classmethod
def get_table_class(cls):
class MyTable(Table):
__defaultname__ = 'my_table'
__columns__ = [
{'name': 'col1', 'type': str, 'help': 'a string column'},
{'name': 'col2', 'type': int, 'help': 'an integer column'},
]
return MyTable
def test_init(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
self.assertTrue(hasattr(table, '__colidx__'))
self.assertEqual(table.__colidx__, {'col1': 0, 'col2': 1})
def test_add_row_getitem(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
table.add_row(col1='foo', col2=100)
table.add_row(col1='bar', col2=200)
row1 = table[0]
row2 = table[1]
self.assertEqual(row1, ('foo', 100))
self.assertEqual(row2, ('bar', 200))
def test_to_dataframe(self):
MyTable = TestTable.get_table_class()
table = MyTable('test_table')
table.add_row(col1='foo', col2=100)
table.add_row(col1='bar', col2=200)
df = table.to_dataframe()
exp = pd.DataFrame(data=[{'col1': 'foo', 'col2': 100}, {'col1': 'bar', 'col2': 200}])
pd.testing.assert_frame_equal(df, exp)
def test_from_dataframe(self):
MyTable = TestTable.get_table_class()
exp = | pd.DataFrame(data=[{'col1': 'foo', 'col2': 100}, {'col1': 'bar', 'col2': 200}]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
from scipy import stats
import math
def clean_data(df):
"""
INPUT
df_listings - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
This function cleans df_listings using the following steps to produce X and y:
1. Drop rows with 0 price and outlier prices (prices above 2950)
2. Create y as the price column, transformed by log
3. Create X from selected columns
4. Deal with missing values
5. Create dummy variables for selected categorical variables, drop the original columns
"""
# Drop rows with 0 price
df = df[df.price > 0]
df = df[df.price < 2950]
# Create y
y = df['price'].apply(math.log)
# Select columns for X
potential_vars = ['host_listings_count',
'calculated_host_listings_count_private_rooms',
'neighbourhood_cleansed',
'room_type',
'property_type',
'beds',
'availability_365',
'number_of_reviews',
'neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about',
'host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification',]
bool_vars = ['host_is_superhost',
'host_has_profile_pic',
'host_identity_verified',
'instant_bookable',
'require_guest_profile_picture',
'require_guest_phone_verification']
free_text_vars = ['neighborhood_overview',
'space',
'notes',
'transit',
'access',
'interaction',
'house_rules',
'host_about']
df = df[potential_vars]
# Deal with missing values
df['number_of_reviews'].fillna(0, inplace=True)
df[bool_vars].fillna('f', inplace=True)
df[free_text_vars].fillna('', inplace=True)
def translate_bool(col):
for index, value in col.iteritems():
col[index] = 1 if value == 't' else 0
return col
def create_bool(col):
for index, value in col.iteritems():
col[index] = 0 if value == '' else 1
return col
fill_mean = lambda col: col.fillna(col.mean())
num_vars = df.select_dtypes(include=['int', 'float']).columns
df[num_vars] = df[num_vars].apply(fill_mean, axis=0)
df[bool_vars] = df[bool_vars].apply(translate_bool, axis=0)
df[bool_vars].dtype = int
df[free_text_vars] = df[free_text_vars].apply(create_bool, axis=0)
df[free_text_vars].dtype = int
# Dummy the categorical variables
cat_vars = ['neighbourhood_cleansed', 'room_type', 'property_type']
for var in cat_vars:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(var, axis=1), pd.get_dummies(df[var], prefix=var, prefix_sep='_', drop_first=True)], axis=1)
X = df
return X, y
def find_optimal_lm_mod(X, y, cutoffs, test_size = .30, random_state=42, plot=True):
'''
INPUT
X - pandas dataframe, X matrix
y - pandas dataframe, response variable
cutoffs - list of ints, cutoff for number of non-zero values in dummy categorical vars
test_size - float between 0 and 1, default 0.3, determines the proportion of data as test data
random_state - int, default 42, controls random state for train_test_split
plot - boolean, default 0.3, True to plot result
OUTPUT
r2_scores_test - list of floats of r2 scores on the test data
r2_scores_train - list of floats of r2 scores on the train data
lm_model - model object from sklearn
X_train, X_test, y_train, y_test - output from sklearn train test split used for optimal model
'''
r2_scores_test, r2_scores_train, num_feats, results = [], [], [], dict()
for cutoff in cutoffs:
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model and obtain pred response
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#append the r2 value from the test set
r2_scores_test.append(r2_score(y_test, y_test_preds))
r2_scores_train.append(r2_score(y_train, y_train_preds))
results[str(cutoff)] = r2_score(y_test, y_test_preds)
if plot:
plt.plot(num_feats, r2_scores_test, label="Test", alpha=.5)
plt.plot(num_feats, r2_scores_train, label="Train", alpha=.5)
plt.xlabel('Number of Features')
plt.ylabel('Rsquared')
plt.title('Rsquared by Number of Features')
plt.legend(loc=1)
plt.show()
best_cutoff = max(results, key=results.get)
#reduce X matrix
reduce_X = X.iloc[:, np.where((X.sum() > int(best_cutoff)) == True)[0]]
num_feats.append(reduce_X.shape[1])
#split the data into train and test
X_train, X_test, y_train, y_test = train_test_split(reduce_X, y, test_size = test_size, random_state=random_state)
#fit the model
lm_model = LinearRegression(normalize=True)
lm_model.fit(X_train, y_train)
return r2_scores_test, r2_scores_train, lm_model, X_train, X_test, y_train, y_test
def main():
plot = False # set to true if you would like to see plots
print_log = True # set to true if you would like to see stats outputted to console
print_result = True
# Data Exploration
desired_width=320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 50)
# Get a sense of the numerical data in the available datasets.
df_listings = pd.read_csv('data/listings_boston.csv', dtype={"price": str,
"weekly_price": str,
"monthly_price": str,
"security_deposit": str,
"cleaning_fee": str,
"extra_people": str,
"host_response_rate": str})
# clean up price data to make it numeric
df_listings.loc[:, "price"] = df_listings["price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "weekly_price"] = df_listings["weekly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "monthly_price"] = df_listings["monthly_price"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "security_deposit"] = df_listings["security_deposit"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "cleaning_fee"] = df_listings["cleaning_fee"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings.loc[:, "extra_people"] = df_listings["extra_people"].str.replace(',', '').str.replace('$', '').astype('float')
df_listings["host_response_rate"].fillna("0", inplace=True)
df_listings.loc[:, "host_response_rate"] = df_listings["host_response_rate"].str.replace('%', '').astype('int')
if print_log:
print(df_listings.describe())
df_neighborhoods = pd.read_csv('data/neighbourhoods_boston.csv')
if print_log:
print(df_neighborhoods.describe())
df_reviews = pd.read_csv('data/reviews_boston.csv')
if print_log:
print(df_reviews.describe())
df_calendar = pd.read_csv('data/calendar_boston.csv', dtype={"price": str, "adjusted_price": str})
# clean up price data to make it numeric
df_calendar.loc[:, "price"] = df_calendar["price"].str.replace(',', '').str.replace('$', '').astype('float')
df_calendar.loc[:, "adjusted_price"] = df_calendar["adjusted_price"].str.replace(',', '').str.replace('$', '').astype('float')
if print_log:
print(df_calendar.describe())
# df_neighborhoods is basically empty and can be ignored
# df_reviews is full of unstructured review data and would have to be mined to produce modelable data
# df_listings has descriptive information about the location
# df_calendar has price information and how it varies over time. Price and adjusted price have to be formatted.
# Going to primarily focus on df_listings
# How many N/A values are there for each column?
if print_log:
for col in df_listings.columns:
print(col, ':', df_listings[col].dropna().shape[0] / df_listings[col].shape[0])
# Possible binary variable conversions: neighborhood_overview, space, notes, transit, access, interaction,
# house_rules
# Are there any correlations we should worry about?
num_vars = ["price",
"weekly_price",
"monthly_price",
"security_deposit",
"cleaning_fee",
"extra_people",
'host_listings_count',
'host_total_listings_count',
'calculated_host_listings_count',
'calculated_host_listings_count_entire_homes',
'calculated_host_listings_count_private_rooms',
'calculated_host_listings_count_shared_rooms',
'host_response_rate',
'accommodates',
'bathrooms',
'bedrooms',
'beds',
'square_feet',
'guests_included',
'minimum_nights',
'minimum_minimum_nights',
'maximum_minimum_nights',
'minimum_nights_avg_ntm',
'maximum_nights',
'minimum_maximum_nights',
'maximum_maximum_nights',
'maximum_nights_avg_ntm',
'availability_30',
'availability_60',
'availability_90',
'availability_365',
'number_of_reviews',
'number_of_reviews_ltm',
'reviews_per_month',
'review_scores_rating',
'review_scores_accuracy',
'review_scores_cleanliness',
'review_scores_checkin',
'review_scores_communication',
'review_scores_location',
'review_scores_value'
]
if plot:
sns.heatmap(df_listings[num_vars].corr(), annot=False, fmt=".2f", cmap="YlGnBu", linewidths=.5, square=True)
plt.show()
# Correlation matrix supports some clearly distinct categories of data
# Pricing: price, weekly_price, monthly_price, security_deposit, cleaning_fee, extra_people
# Host: host_listings_count, host_total_listings_count, calculated_host_listings_count,
# calculated_host_listings_count_entire_homes, calculated_host_listings_count_private_rooms,
# calculated_host_listings_count_shared_rooms
# Property: accommodates, bathrooms, bedrooms, beds, square_feet, guests_included, minimum_nights,
# minimum_minimum_nights, maximum_minimum_nights, minimum_nights_avg_ntm, maximum_nights, minimum_maximum_nights,
# maximum_maximum_nights, maximum_nights_avg_ntm
# Availability: availability_30, availability_60, availability_90, availability_365
# Reviews: number_of_reviews, number_of_reviews_ltm, reviews_per_month, review_scores_rating,
# review_scores_cleanliness, review_scores_checkin, review_scores_communication, review_scores_location,
# review_scores_value
# Get a sense of the categorical data in the available data.
cat_vars = ["space",
"description",
"experiences_offered",
"neighborhood_overview",
"notes",
"transit",
"access",
"interaction",
"house_rules",
"host_name",
"host_since",
"host_location",
"host_about",
"host_response_time",
"host_acceptance_rate",
"host_is_superhost",
"host_neighbourhood",
"host_verifications",
"host_has_profile_pic",
"host_identity_verified",
"street",
"neighbourhood",
"neighbourhood_cleansed",
"market",
"smart_location",
"is_location_exact",
"property_type",
"room_type",
"bed_type",
"amenities",
"extra_people",
"calendar_updated",
"has_availability",
"calendar_last_scraped",
"requires_license",
"instant_bookable",
"is_business_travel_ready",
"cancellation_policy",
"require_guest_profile_picture",
"require_guest_phone_verification"]
if print_log:
for col in df_listings[cat_vars].columns:
print(df_listings[[col, 'price']].groupby([col]).mean())
print(df_listings[col].value_counts())
# free text columns: space, description, neighborhood_overview, notes, transit, access, interaction, house_rules,
# host_name, host_about,
# empty: experiences_offered, market, calendar_last_scraped, requires_license, is_business_travel_ready
# boolean: host_is_superhost, host_has_profile_pic, host_identity_verified, is_location_exact, has_availability,
# instant_bookable, require_guest_profile_picture, require_guest_phone_verification, host_about
# categorical: property_type, room_type, bed_type, amenities, calendar_updated, cancellation_policy,
if print_log:
print( | pd.crosstab(df_listings['neighbourhood'], df_listings['room_type']) | pandas.crosstab |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat = Categorical(["a", "b", "c", "d"], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "a")
self.assertEqual(_max, "d")
cat = Categorical(["a", "b", "c", "d"],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertEqual(_min, "d")
self.assertEqual(_max, "a")
cat = Categorical([np.nan, "b", "c", np.nan],
categories=['d', 'c', 'b', 'a'], ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, "b")
_min = cat.min(numeric_only=True)
self.assertEqual(_min, "c")
_max = cat.max(numeric_only=True)
self.assertEqual(_max, "b")
cat = Categorical([np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1],
ordered=True)
_min = cat.min()
_max = cat.max()
self.assertTrue(np.isnan(_min))
self.assertEqual(_max, 1)
_min = cat.min(numeric_only=True)
self.assertEqual(_min, 2)
_max = cat.max(numeric_only=True)
self.assertEqual(_max, 1)
def test_unique(self):
# categories are reordered based on value when ordered=False
cat = | Categorical(["a", "b"]) | pandas.Categorical |
import json
from datetime import timedelta, datetime
import calendar
import pandas as pd
import urllib.request
from tinydb import TinyDB, Query
class IEXTrading(object):
def __init__(self):
pass
def get_earnings_today(self):
url = "https://api.iextrading.com/1.0/stock/market/today-earnings"
print(url)
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
return data
def get_quote_daily(self, symbol, bars=22, before_date=None):
if before_date:
delta = (datetime.now().date() - before_date)
day_diff = delta.days
if day_diff > 0:
bars = bars+day_diff
if bars <= 20:
query_length = '1m'
elif bars <= 60:
query_length = '3m'
elif bars <= 120:
query_length = '6m'
elif bars <= 240:
query_length = '1y'
elif bars <= 480:
query_length = '2y'
else:
query_length = '5y'
# url = "https://api.iextrading.com/1.0/stock/%s/chart/%s?chartLast=%s" % (symbol.lower(), query_length, bars)
url = "https://api.iextrading.com/1.0/stock/%s/chart/%s" % (symbol.lower(), query_length)
print(url)
bars_df = None
with urllib.request.urlopen(url) as response:
content = response.read()
# resp, content = self.client.request(url, "GET")
# print(content)
data = json.loads(content.decode('utf-8'))
quotes = data
# print(quotes)
print(len(quotes))
for quote_data in quotes:
quote_date = datetime.strptime(quote_data['date'], "%Y-%m-%d")
if before_date and quote_date.date() >= before_date:
break
bar = pd.DataFrame(index= | pd.DatetimeIndex([quote_date]) | pandas.DatetimeIndex |
#%%
import os
import sys
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
from pymaid_creds import url, name, password, token
import pymaid
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
rm = pymaid.CatmaidInstance(url, token, name, password)
adj = pd.read_csv('VNC_interaction/data/axon-dendrite.csv', header = 0, index_col = 0)
inputs = pd.read_csv('VNC_interaction/data/input_counts.csv', index_col = 0)
inputs = pd.DataFrame(inputs.values, index = inputs.index, columns = ['axon_input', 'dendrite_input'])
pairs = pd.read_csv('VNC_interaction/data/pairs-2020-10-26.csv', header = 0) # import pairs
# %%
from connectome_tools.process_matrix import Adjacency_matrix, Promat
from datetime import date
VNC_adj = Adjacency_matrix(adj.values, adj.index, pairs, inputs,'axo-dendritic')
#test.adj_inter.loc[(slice(None), slice(None), KC), (slice(None), slice(None), MBON)]
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
A1 = pymaid.get_skids_by_annotation('mw A1 neurons paired')
A1_acess = pymaid.get_skids_by_annotation('mw A1 accessory neurons')
A1 = A1 + A1_acess
A1_MN = pymaid.get_skids_by_annotation('mw A1 MN')
A1_proprio = pymaid.get_skids_by_annotation('mw A1 proprio')
A1_chordotonal = pymaid.get_skids_by_annotation('mw A1 chordotonals')
A1_noci = pymaid.get_skids_by_annotation('mw A1 noci')
A1_classII_III = pymaid.get_skids_by_annotation('mw A1 somato')
A1_external = pymaid.get_skids_by_annotation('mw A1 external sensories')
# A1 vtd doesn't make
# %%
from connectome_tools.cascade_analysis import Celltype_Analyzer, Celltype
# VNC layering with respect to sensories or motorneurons
threshold = 0.01
######
# Modify this section if new layering groups need to be added
######
# manual add neuron groups of interest here and names
names = ['us-MN', 'ds-Proprio', 'ds-Noci', 'ds-Chord', 'ds-ClassII_III', 'ds-ES']
general_names = ['pre-MN', 'Proprio', 'Noci', 'Chord', 'ClassII_III', 'ES']
all_source = A1_MN + A1_proprio + A1_chordotonal + A1_noci + A1_classII_III + A1_external
min_members = 4
# manually determine upstream or downstream relation
us_A1_MN = VNC_adj.upstream_multihop(A1_MN, threshold, min_members=min_members, exclude = all_source)
ds_proprio = VNC_adj.downstream_multihop(A1_proprio, threshold, min_members=min_members, exclude = all_source)
ds_chord = VNC_adj.downstream_multihop(A1_chordotonal, threshold, min_members=min_members, exclude = all_source)
ds_noci = VNC_adj.downstream_multihop(A1_noci, threshold, min_members=min_members, exclude = all_source)
ds_classII_III = VNC_adj.downstream_multihop(A1_classII_III, threshold, min_members=min_members, exclude = all_source)
ds_external = VNC_adj.downstream_multihop(A1_external, threshold, min_members=min_members, exclude = all_source)
VNC_layers = [us_A1_MN, ds_proprio, ds_noci, ds_chord, ds_classII_III, ds_external]
cat_order = ['pre-MN', 'Proprio', 'Noci', 'Chord', 'ClassII_III', 'ES']
########
########
# how many neurons are included in layering?
all_included = [x for sublist in VNC_layers for subsublist in sublist for x in subsublist]
frac_included = len(np.intersect1d(A1, all_included))/len(A1)
print(f'Fraction VNC cells covered = {frac_included}')
# how similar are layers
celltypes = []
for ct_i, celltype in enumerate(VNC_layers):
ct = [Celltype(f'{names[ct_i]}-{i}', layer) for i, layer in enumerate(celltype)]
celltypes = celltypes + ct
VNC_analyzer = Celltype_Analyzer(celltypes)
fig, axs = plt.subplots(1, 1, figsize = (10, 10))
sns.heatmap(VNC_analyzer.compare_membership(), square = True, ax = axs)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_similarity_between_VNC_layers.pdf', bbox_inches='tight')
# %%
# number of VNC neurons per layer
VNC_layers = [[A1_MN] + us_A1_MN, [A1_proprio] + ds_proprio, [A1_noci] + ds_noci, [A1_chordotonal] + ds_chord, [A1_classII_III] + ds_classII_III, [A1_external] + ds_external]
all_layers, all_layers_skids = VNC_adj.layer_id(VNC_layers, general_names, A1)
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(all_layers.T, annot=True, fmt='.0f', cmap = 'Greens', cbar = False, ax = axs)
ax.set_title(f'A1 Neurons; {frac_included*100:.0f}% included')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC_layers.pdf', bbox_inches='tight')
# %%
# where are ascendings in layering?
A1_ascending = pymaid.get_skids_by_annotation('mw A1 neurons paired ascending')
A00c = pymaid.get_skids_by_annotation('mw A00c')
A1_ascending = A1_ascending + A00c #include A00c's as ascending (they are not in A1, but in A4/5/6 and so have different annotations)
ascendings_layers, ascendings_layers_skids = VNC_adj.layer_id(VNC_layers, general_names, A1_ascending)
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(ascendings_layers.T, annot=True, fmt='.0f', cmap = 'Blues', cbar = False, ax = axs)
ax.set_title('Ascending Neurons')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_ascending_neuron_layers.pdf', bbox_inches='tight')
# %%
# number of neurons downstream of dVNC at each VNC layer
source_dVNC, ds_dVNC = VNC_adj.downstream(source=dVNC, threshold=threshold, exclude=dVNC)
edges, ds_dVNC = VNC_adj.edge_threshold(source_dVNC, ds_dVNC, threshold, direction='downstream')
ds_dVNC_layers, ds_dVNC_layers_skids = VNC_adj.layer_id(VNC_layers, general_names, ds_dVNC)
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(ds_dVNC_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Reds', cbar = False, ax = ax)
ax.set_title('Downstream of dVNCs')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_dVNC_downstream_targets.pdf', bbox_inches='tight')
# %%
# location of special-case neurons in VNC layering
# gorogoro, basins, A00cs
gorogoro = pymaid.get_skids_by_annotation('gorogoro')
basins = pymaid.get_skids_by_annotation('a1basins')
A00c_layers, A00c_skids = VNC_adj.layer_id(VNC_layers, general_names, A00c)
gorogoro_layers, gorogoro_skids = VNC_adj.layer_id(VNC_layers, general_names, gorogoro)
basins_layers, basins_skids = VNC_adj.layer_id(VNC_layers, general_names, basins)
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(A00c_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Purples', cbar = False, ax = ax)
ax.set_title('A00c location')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC_layers_A00c_location.pdf', bbox_inches='tight')
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(gorogoro_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Purples', cbar = False, ax = ax)
ax.set_title('gorogoro location')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC_layers_gorogoro_location.pdf', bbox_inches='tight')
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(basins_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Purples', cbar = False, ax = ax)
ax.set_title('basins location')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC_layers_basins_location.pdf', bbox_inches='tight')
# same neurons but checking if they are directly downstream of dVNCs
dsdVNC_A00c_layers, dsdVNC_A00c_skids = VNC_adj.layer_id(ds_dVNC_layers_skids.T.values, general_names, A00c)
dsdVNC_gorogoro_layers, dsdVNC_gorogoro_skids = VNC_adj.layer_id(ds_dVNC_layers_skids.T.values, general_names, A00c)
dsdVNC_basins_layers, dsdVNC_basins_skids = VNC_adj.layer_id(ds_dVNC_layers_skids.T.values, general_names, A00c)
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(dsdVNC_A00c_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Reds', cbar = False, ax = ax)
ax.set_title('A00c location - ds-dVNCs')
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(dsdVNC_gorogoro_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Reds', cbar = False, ax = ax)
ax.set_title('gorogoro location - ds-dVNCs')
fig, axs = plt.subplots(
1, 1, figsize = (2.5, 3)
)
ax = axs
sns.heatmap(dsdVNC_basins_layers.T, cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Reds', cbar = False, ax = ax)
ax.set_title('basins location - ds-dVNCs')
# conclusion - basins/gorogoro/A00c's don't receive direct dVNC input
# %%
# plot A1 structure together
plt.rcParams['font.size'] = 5
fig, axs = plt.subplots(
1, 3, figsize = (3.25, 1.5)
)
ax = axs[0]
sns.heatmap(all_layers.T.loc[:, cat_order], cbar_kws={'label': 'Number of Neurons'}, annot = True, fmt='.0f', cmap = 'Greens', cbar = False, ax = ax)
ax.set_title('A1 Neurons')
ax = axs[1]
sns.heatmap(ascendings_layers.T.loc[:, cat_order], cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Blues', cbar = False, ax = ax)
ax.set_title('Ascendings')
ax.set_yticks([])
ax = axs[2]
sns.heatmap(ds_dVNC_layers.T.loc[:, cat_order], cbar_kws={'label': 'Number of Neurons'}, annot = True, cmap = 'Reds', cbar = False, ax = ax)
ax.set_title('ds-dVNCs')
ax.set_yticks([])
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_A1_structure.pdf', bbox_inches='tight')
plt.rcParams['font.size'] = 6
# %%
# upset plot of VNC types (MN, Proprio, etc.) with certain number of hops (hops_included)
from upsetplot import plot
from upsetplot import from_contents
from upsetplot import from_memberships
hops_included = 2
VNC_types=[]
for celltype in VNC_layers:
VNC_type = [x for layer in celltype[0:hops_included+1] for x in layer]
VNC_types.append(VNC_type)
data = [x for cell_type in VNC_types for x in cell_type]
data = np.unique(data)
cats_simple = []
for skid in data:
cat = []
for i in range(0, len(general_names)):
if(skid in VNC_types[i]):
cat = cat + [f'{general_names[i]}']
cats_simple.append(cat)
VNC_types_df = from_memberships(cats_simple, data = data)
counts = []
for celltype in np.unique(cats_simple):
count = 0
for cat in cats_simple:
if(celltype == cat):
count += 1
counts.append(count)
# how many neurons belong to a category with X hops_included
coverage = []
for celltype in VNC_layers:
celltype_list = [x for sublist in celltype[0:hops_included+1] for x in sublist]
coverage = coverage + celltype_list
coverage = np.unique(coverage)
# threhold small categories (<=4 neurons) to simplify the plot
upset_thres = [x>4 for x in counts]
cats_simple_cut = [x for i, x in enumerate(np.unique(cats_simple)) if upset_thres[i]]
counts_cut = [x for i, x in enumerate(counts) if upset_thres[i]]
# set up the data variable
upset = from_memberships(np.unique(cats_simple_cut), data = counts_cut)
upset.index = upset.index.reorder_levels(cat_order) # order categories
plot(upset, sort_categories_by = None)
plt.title(f'{len(np.intersect1d(A1, coverage))/len(A1)*100:.2f}% of A1 neurons covered')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC-signal-type_hops-{hops_included}.pdf', bbox_inches='tight')
# %%
# upset plot of VNC types including layers (MN-0, -1, -2, Proprio-0, -1, 2, Somat-0, -1, -2, etc.)
VNC_layers_nostart = [us_A1_MN, ds_proprio, ds_chord, ds_noci, ds_classII_III, ds_external]
VNC_type_layers = [x for sublist in VNC_layers_nostart for x in sublist]
VNC_type_layer_names = [x.name for x in celltypes]
data = [x for cell_type in VNC_type_layers for x in cell_type]
data = np.unique(data)
cats_complex = []
for skid in data:
cat = []
for i, layer in enumerate(VNC_type_layers):
if(skid in layer):
cat = cat + [VNC_type_layer_names[i]]
cats_complex.append(cat)
VNC_type_layers_df = from_memberships(cats_complex, data = data)
counts = []
for celltype in np.unique(cats_complex):
count = 0
for cat in cats_complex:
if(celltype == cat):
count += 1
counts.append(count)
upset_complex = from_memberships(np.unique(cats_complex), data = counts)
plot(upset_complex, sort_categories_by = None)
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_VNC_layer_signal_type.pdf', bbox_inches='tight')
# %%
# supplementary plot with exclusive MN, proprio, and Somato types
from upsetplot import UpSet
def upset_subset(upset_types_layers, upset_names, column_layer, layer_structure_name, column_name, height, width_col, color, cat_order):
# column_layer example = all_layers.T.{column_name}
# added for some edge cases
if(sum(column_layer)==0):
return()
df = pd.DataFrame()
df[f'{column_name}'] = column_layer # add first column of layering information for baseline, has nothing to do with UpSet data
for i, types in enumerate(upset_types_layers):
df[f'{upset_names[i]}'] = types.loc[:, f'{column_name}']
# plot types
data = df.iloc[1:len(df), :]
nonzero_cols = data.sum(axis=0)!=0
data_cleaned = data.loc[:, nonzero_cols]
data_cleaned_summed = data_cleaned.sum(axis=1)
signal = []
for i in reversed(range(0, len(data_cleaned_summed))):
if(data_cleaned_summed.iloc[i]>0):
signal=i+1
break
data_cleaned = data_cleaned.iloc[0:signal, :]
mask = np.full((len(data_cleaned.index),len(data_cleaned.columns)), True, dtype=bool)
mask[:, 0] = [False]*len(data_cleaned.index)
fig, axs = plt.subplots(
1, 1, figsize=(width_col*len(data_cleaned.columns), height*len(data_cleaned.index))
)
annotations = data_cleaned.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(data_cleaned, annot = annotations, fmt = 's', mask = mask, ax=axs, cmap = color, cbar = False)
sns.heatmap(data_cleaned, annot = annotations, fmt = 's', mask = np.invert(mask), ax=axs, cbar = False)
plt.savefig(f'VNC_interaction/plots/supplemental/Supplemental_{layer_structure_name}_{column_name}.pdf', bbox_inches='tight')
#Upset plot
nonzero_permut = [upset_names[i] for i, boolean in enumerate(nonzero_cols[1:]) if boolean==True]
nonzero_counts = data_cleaned.sum(axis=0)[1:]
permut_types_df = from_memberships(nonzero_permut, nonzero_counts)
cat_order = [cat_order[i] for i, x in enumerate([x in permut_types_df.index.names for x in cat_order]) if x==True] # remove any cat_order names if missing; added for edge cases
permut_types_df.index = permut_types_df.index.reorder_levels(cat_order)
plot(permut_types_df, sort_categories_by = None)
plt.savefig(f'VNC_interaction/plots/supplemental/Supplemental_{layer_structure_name}_{column_name}_Upset.pdf', bbox_inches='tight')
# all permutations when considering hops_included UpSet plot
permut = UpSet(upset).intersections.index
# names of these permutations
upset_names = []
for sublist in permut:
upset_names.append([permut.names[i] for i, boolean in enumerate(sublist) if boolean==True ])
# reorder multiindex of VNC_types_df according to permut
VNC_types_df = VNC_types_df.reorder_levels(permut.names)
# skids for each permutation
upset_skids = [VNC_types_df.loc[x] for x in permut]
# all VNC layers
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(all_layers_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in all_layers.T.columns:
upset_subset(upset_types_layers, upset_names, all_layers.T.loc[:, layer_type], 'VNC_layers', layer_type, 0.2, 0.2, 'Greens', cat_order)
# ascending locations
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(ascendings_layers_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in ascendings_layers.T.columns:
upset_subset(upset_types_layers, upset_names, ascendings_layers.T.loc[:, layer_type], 'Ascendings_layers', layer_type, 0.2, 0.2, 'Blues', cat_order)
# ds-dVNC_layers
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(ds_dVNC_layers_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in ds_dVNC_layers.T.columns:
upset_subset(upset_types_layers, upset_names, ds_dVNC_layers.T.loc[:, layer_type], 'ds-dVNCs_layers', layer_type, 0.2, 0.2, 'Reds', cat_order)
# %%
# locations of basins/goro/A00c
# not working in this version of the script for some reason
# basin locations
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(basins_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in basins_layers.T.columns:
upset_subset(upset_types_layers, upset_names, all_layers.T.loc[:, layer_type], 'basin_layers', layer_type, 0.2, 0.2, 'Greens', cat_order)
# gorogoro locations
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(gorogoro_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in gorogoro_layers.T.columns:
upset_subset(upset_types_layers, upset_names, all_layers.T.loc[:, layer_type], 'goro_layers', layer_type, 0.2, 0.2, 'Greens', cat_order)
'''
# A00c locations
upset_types_layers = []
upset_types_skids = []
for skids in upset_skids:
count_layers, layer_skids = VNC_adj.layer_id(A00c_skids.T.values, general_names, skids.values)
upset_types_layers.append(count_layers.T)
upset_types_skids.append(layer_skids)
for layer_type in A00c_layers.T.columns:
upset_subset(upset_types_layers, upset_names, ascendings_layers.T.loc[:, layer_type], 'A00c_layers', layer_type, 0.2, 0.2, 'Blues', cat_order)
'''
# %%
# identities of ascending neurons
# further develop this to identify next hit on "unknown" ascendings
from itertools import compress
from tqdm import tqdm
# ascending identities using 2 hops from sensory/motorneurons
# no difference between 1st-order and 2nd-order
ascending_pairs = Promat.extract_pairs_from_list(A1_ascending, pairs)[0]
VNC_types_df = VNC_types_df.reorder_levels(general_names)
ascending_types = [VNC_types_df.index[VNC_types_df==x] for x in ascending_pairs.leftid]
col = []
for types in ascending_types:
if(len(types)==0):
col.append('Unknown')
if(len(types)>0):
bool_types = [x for sublist in types for x in sublist]
col.append(list(compress(general_names, bool_types)))
ascending_pairs['type'] = col
# multiple-hop matrix of A1 sensories to A1 ascendings
MN_pairs = Promat.extract_pairs_from_list(A1_MN, pairs)[0]
proprio_pairs = Promat.extract_pairs_from_list(A1_proprio, pairs)[0]
chord_pairs = Promat.extract_pairs_from_list(A1_chordotonal, pairs)[0]
noci_pairs = Promat.extract_pairs_from_list(A1_noci, pairs)[0]
classII_III_pairs = Promat.extract_pairs_from_list(A1_classII_III, pairs)[0]
external_pairs = Promat.extract_pairs_from_list(A1_external, pairs)[0]
sens_pairs = pd.concat([proprio_pairs, noci_pairs, chord_pairs, classII_III_pairs, external_pairs])
sens_pairs.index = range(0, len(sens_pairs))
# determining hops from each sensory modality for each ascending neuron (using all hops)
# sensory modalities generally
sens_paths = VNC_layers
ascending_layers,ascending_skids = VNC_adj.layer_id(sens_paths, general_names, A1_ascending)
sens_asc_mat, sens_asc_mat_plotting = VNC_adj.hop_matrix(ascending_skids.T, general_names, ascending_pairs.leftid, include_start=True)
# hops from each modality
sens_asc_mat.T
# hops from each modality, threshold = 2
hops = 2
sens_asc_mat_thresh = sens_asc_mat.T.copy()
sens_asc_mat_thresh[sens_asc_mat_thresh>hops]=0
sens_asc_mat_thresh
# sorting ascendings by type
proprio_order1 = list(sens_asc_mat_thresh.index[sens_asc_mat_thresh.Proprio==1])
chord_order1_2 = list(sens_asc_mat_thresh.index[sens_asc_mat_thresh.Chord==1]) + list(sens_asc_mat_thresh.index[(sens_asc_mat_thresh.Chord==2) & (sens_asc_mat_thresh.Noci==0)])
classII_III_order1 = list(sens_asc_mat_thresh.index[sens_asc_mat_thresh.ClassII_III==1])
noci_order2 = list(sens_asc_mat_thresh.index[sens_asc_mat_thresh.Noci==2])
unknown = list(sens_asc_mat_thresh.index[(sens_asc_mat_thresh!=0).sum(axis=1)==0])
# manual reordering based on secondary sensory partners
proprio_order1 = [proprio_order1[i] for i in [1,2,0]]
#chord_order1_2 = [chord_order1_2[i] for i in [1,2,0,5,3,4]] #lost 11455472
noci_order2 = [noci_order2[i] for i in [3,1,2,0]]
unknown = [unknown[i] for i in [2, 3, 0, 1, 4, 5, 6]]
sens_asc_order = proprio_order1 + chord_order1_2 + classII_III_order1 + noci_order2 + unknown
annotations = sens_asc_mat.loc[:, sens_asc_order].astype(int).astype(str)
annotations[annotations=='0']=''
fig, ax = plt.subplots(1,1,figsize=(1.75,1))
sens_asc_mat_plotting_2 = sens_asc_mat_plotting.copy()
sens_asc_mat_plotting_2 = sens_asc_mat_plotting_2.loc[:, sens_asc_order]
sens_asc_mat_plotting_2[sens_asc_mat_plotting_2<7] = 0
sens_asc_mat_plotting_2[sens_asc_mat_plotting_2.loc[:, proprio_order1]<8] = 0
sns.heatmap(sens_asc_mat_plotting_2, annot=annotations, fmt = 's', cmap = 'Blues', ax=ax, cbar = False)
plt.xticks(range(len(sens_asc_mat_plotting_2.columns)), sens_asc_mat_plotting_2.columns, ha='left')
plt.setp(ax.get_xticklabels(), Fontsize=4);
ax.tick_params(left=False, bottom=False, length=0)
plt.savefig(f'VNC_interaction/plots/individual_asc_paths/Supplemental_ascending_identity_matrix.pdf', bbox_inches='tight')
# export raw data using ascending type sorting
sens_asc_mat_thresh.T.loc[:, sens_asc_order].to_csv(f'VNC_interaction/plots/individual_asc_paths/ascending_identity_{hops}-hops.csv')
sens_asc_mat.loc[:, sens_asc_order].to_csv(f'VNC_interaction/plots/individual_asc_paths/ascending_identity_all-hops.csv')
# %%
#UpSet based on first two hops from each sensory modality
#doesn't do it based on a pairwise measure?
#**** probably needs more work****
hops_included = 2
celltypes_2o = []
for ct_i, celltype in enumerate(VNC_layers_nostart):
ct = [Celltype(f'{names[ct_i]}-{i+1}', layer) for i, layer in enumerate(celltype) if i<2]
celltypes_2o = celltypes_2o + ct
celltypes_2o_layers = [x.get_skids() for x in celltypes_2o]
celltypes_2o_names = [x.name for x in celltypes_2o]
data = [x for cell_type in celltypes_2o_layers for x in cell_type]
data = np.unique(data)
cats_2o = []
for skid in data:
cat = []
for i, layer in enumerate(celltypes_2o_layers):
if(skid in layer):
cat = cat + [celltypes_2o_names[i]]
cats_2o.append(cat)
celltypes_2o_df = from_memberships(cats_2o, data = data)
counts = []
for celltype in np.unique(cats_2o):
count = 0
for cat in cats_2o:
if(celltype == cat):
count += 1
counts.append(count)
upset_2o = from_memberships(np.unique(cats_2o), data = counts)
plot(upset_2o, sort_categories_by = None)
#plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_ascendings_signal_type.pdf', bbox_inches='tight')
# UpSet based on first two hops, ascendings only
data = A1_ascending
cats_2o_asc = []
for skid in data:
cat = []
for i, layer in enumerate(celltypes_2o_layers):
if(skid in layer):
cat = cat + [celltypes_2o_names[i]]
cats_2o_asc.append(cat)
celltypes_2o_asc_df = from_memberships(cats_2o_asc, data = data)
counts = []
for celltype in np.unique(cats_2o_asc):
count = 0
for cat in cats_2o:
if(celltype == cat):
count += 1
counts.append(count)
upset_2o_asc = from_memberships(np.unique(cats_2o_asc), data = counts)
plot(upset_2o_asc, sort_categories_by = None)
# %%
# pathways downstream of each dVNC pair
# with detailed VNC layering types
from tqdm import tqdm
source_dVNC, ds_dVNC = VNC_adj.downstream(dVNC, threshold, exclude=dVNC)
edges, ds_dVNC_cleaned = VNC_adj.edge_threshold(source_dVNC, ds_dVNC, threshold, direction='downstream')
edges[edges.overthres==True]
source_dVNC_cleaned = np.unique(edges[edges.overthres==True].upstream_pair_id)
source_dVNC_pairs = VNC_adj.adj_inter.loc[(slice(None), source_dVNC_cleaned), :].index
source_dVNC_pairs = [x[2] for x in source_dVNC_pairs]
source_dVNC_pairs = Promat.extract_pairs_from_list(source_dVNC_pairs, pairs)[0]
pair_paths = []
for index in tqdm(range(0, len(source_dVNC_pairs))):
ds_dVNC = VNC_adj.downstream_multihop(list(source_dVNC_pairs.loc[index]), threshold, min_members = 0, hops=5)
pair_paths.append(ds_dVNC)
# determine which neurons are only in one pathway
VNC_types_index = pd.DataFrame([x for x in VNC_types_df.index], index = VNC_types_df.values, columns = VNC_types_df.index.names)
sensory_type = list(VNC_types_index[(VNC_types_index['pre-MN'] == False)].index)
motor_sens_MN = list(np.intersect1d(sensory_type, A1_MN))
motor_MN = list(np.setdiff1d(A1_MN, motor_sens_MN))
sensory_type = np.setdiff1d(sensory_type, A1_MN)
mixed_type = motor_sens_MN + list(VNC_types_index[(VNC_types_index['pre-MN'] == True) & (sum(VNC_types_index.iloc[0, VNC_types_index.columns != 'pre-MN'])>0)].index)
motor_type = motor_MN + list(VNC_types_index[(VNC_types_index['pre-MN'] == True) & (sum(VNC_types_index.iloc[0, VNC_types_index.columns != 'pre-MN'])==0)].index)
# types of neurons
motor_type_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, motor_type)
motor_type_layers_ascend,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(motor_type, A1_ascending))
motor_type_layers_MN,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(motor_type, A1_MN))
sensory_type_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, sensory_type)
sensory_type_layers_ascend,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(sensory_type, A1_ascending))
sensory_type_layers_MN,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(sensory_type, A1_MN))
mixed_type_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, mixed_type)
mixed_type_layers_ascend,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(mixed_type, A1_ascending))
mixed_type_layers_MN,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, np.intersect1d(mixed_type, A1_MN))
fig, axs = plt.subplots(
3, 3, figsize=(5, 7)
)
vmax = 4
ax = axs[0,0]
annotations = motor_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(motor_type_layers, annot = annotations, fmt = 's', cmap = 'Reds', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('Individual dVNCs Paths')
ax.set(title='Motor Pathway Exclusive')
ax = axs[0,1]
annotations = motor_type_layers_MN.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(motor_type_layers_MN, annot = annotations, fmt = 's', cmap = 'Reds', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('')
ax.set(title='Motor Exclusive Motorneurons')
ax = axs[0,2]
annotations = motor_type_layers_ascend.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(motor_type_layers_ascend, annot = annotations, fmt = 's', cmap = 'Reds', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('')
ax.set(title='Motor Exclusive Ascending')
vmax = 20
ax = axs[1,0]
annotations = sensory_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(sensory_type_layers, annot = annotations, fmt = 's', cmap = 'Blues', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('Individual dVNCs Paths')
ax.set(title='VNC Sensory Pathway Exclusive')
ax = axs[1,1]
annotations = sensory_type_layers_MN.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(sensory_type_layers_MN, annot = annotations, fmt = 's', cmap = 'Blues', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('')
ax.set(title='VNC Sensory Motorneurons')
ax = axs[1,2]
annotations = sensory_type_layers_ascend.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(sensory_type_layers_ascend, annot = annotations, fmt = 's', cmap = 'Blues', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_ylabel('')
ax.set(title='VNC Sensory Ascending')
vmax = 80
ax = axs[2,0]
annotations = mixed_type_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(mixed_type_layers, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, vmax = vmax, cbar = False)
ax.set_yticks([])
ax.set_ylabel('Individual dVNCs Paths')
ax.set(title='Mixed Pathway Neurons')
ax = axs[2,1]
annotations = mixed_type_layers_MN.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(mixed_type_layers_MN, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, vmax = 60, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Mixed Pathway Motorneurons')
ax = axs[2,2]
annotations = mixed_type_layers_ascend.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(mixed_type_layers_ascend, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, vmax = 20, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Mixed Pathway Ascending')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_individual_dVNC_paths.pdf', bbox_inches='tight')
# %%
# pathways downstream of each dVNC pair
# set up sensory categories to test against dVNC pathways
# excluded motorneurons from these categories
proprio_1o = list(np.setdiff1d(ds_proprio[0], A1_MN))
proprio_2o = list(np.setdiff1d(ds_proprio[1], A1_MN))
somato_1o = list(np.setdiff1d(ds_classII_III[0] + ds_chord[0] + ds_noci[0] + ds_external[0], A1_MN))
somato_2o = list(np.setdiff1d(ds_classII_III[1] + ds_chord[1] + ds_noci[1] + ds_external[1], A1_MN))
sens_12o = np.unique(proprio_1o + proprio_2o + somato_1o + somato_2o)
sens_1o = np.unique(proprio_1o + somato_1o)
sens_2o = np.unique(proprio_2o + somato_2o)
# check overlap between these sensory categories (just for curiosity)
A1_ct = Celltype('A1_all', A1)
proprio_1o_ct = Celltype('Proprio 1o', proprio_1o)
proprio_2o_ct = Celltype('Proprio 2o', proprio_2o)
somato_1o_ct = Celltype('Somato 1o', somato_1o)
somato_2o_ct = Celltype('Somato 2o', somato_2o)
sens_12o_ct = Celltype('All Sens 1o/2o', sens_12o)
sens_1o_ct = Celltype('All Sens 1o', sens_1o)
sens_2o_ct = Celltype('All Sens 2o', sens_2o)
cta = Celltype_Analyzer([A1_ct, sens_12o_ct, sens_1o_ct, sens_2o_ct, proprio_1o_ct, proprio_2o_ct, somato_1o_ct, somato_2o_ct])
sns.heatmap(cta.compare_membership(), annot=True)
#VNC_sens_type = list(np.setdiff1d(VNC_types_index[(VNC_types_index['pre-MN'] == False)].index, A1_MN))
#VNC_sens_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, VNC_sens_type)
# identifying different cell types in dVNC pathways
all_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, VNC_adj.adj.index) # include all neurons to get total number of neurons per layer
motor_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, A1_MN)
ascending_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, A1_ascending)
goro_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, gorogoro)
basins_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, basins)
A00c_simple_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, A00c) # no contact
proprio_1o_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, proprio_1o)
proprio_2o_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, proprio_2o)
somato_1o_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, somato_1o)
somato_2o_layers,_ = VNC_adj.layer_id(pair_paths, source_dVNC_pairs.leftid, somato_2o)
# order manually identified for figure
dVNC_order = [15639294, 18305433, 10553248, 10728328, # type-1: only to MN
19361427, 11013583, 19298644, 5690425, 6446394, # type-2: immediately to ascending
17777031, 10609443, # type-3: ascending on the way to MN
20556072, 10728333, 19298625, 10018150, # type-4: ascending after MN
3979181, # other: gorogoro first order
7227010, 16851496, 17053270] # supplemental: terminates in A1
all_simple_layers = all_simple_layers.loc[dVNC_order, :]
motor_simple_layers = motor_simple_layers.loc[dVNC_order, :]
ascending_simple_layers = ascending_simple_layers.loc[dVNC_order, :]
proprio_1o_layers = proprio_1o_layers.loc[dVNC_order, :]
proprio_2o_layers = proprio_2o_layers.loc[dVNC_order, :]
somato_1o_layers = somato_1o_layers.loc[dVNC_order, :]
somato_2o_layers = somato_2o_layers.loc[dVNC_order, :]
fig, axs = plt.subplots(
1, 7, figsize=(7, 2.25)
)
ax = axs[0]
annotations = all_simple_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(all_simple_layers, annot = annotations, fmt = 's', cmap = 'Greens', ax = ax, vmax = 80, cbar = False)
ax.set_yticks([])
ax.set_ylabel('Individual dVNCs Paths')
ax.set(title='Pathway Overview')
ax = axs[1]
annotations = motor_simple_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(motor_simple_layers, annot = annotations, fmt = 's', cmap = 'Reds', ax = ax, vmax = 80, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Motorneurons')
ax = axs[2]
annotations = ascending_simple_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(ascending_simple_layers, annot = annotations, fmt = 's', cmap = 'Blues', ax = ax, vmax = 20, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Ascendings')
ax = axs[3]
annotations = proprio_1o_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(proprio_1o_layers, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, vmax = 50, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Proprio 1o')
ax = axs[4]
annotations = proprio_2o_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(proprio_2o_layers, annot = annotations, fmt = 's', cmap = 'Purples', ax = ax, vmax = 50, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Proprio 2o')
ax = axs[5]
annotations = somato_1o_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(somato_1o_layers, annot = annotations, fmt = 's', cmap = 'GnBu', ax = ax, vmax = 50, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Somato 1o')
ax = axs[6]
annotations = somato_2o_layers.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(somato_2o_layers, annot = annotations, fmt = 's', cmap = 'GnBu', ax = ax, vmax = 50, cbar = False)
ax.set_yticks([])
ax.set_ylabel('')
ax.set(title='Somato 2o')
plt.savefig(f'VNC_interaction/plots/Threshold-{threshold}_individual_dVNC_paths_simple.pdf', bbox_inches='tight')
# %%
# plot by individual dVNC
# split plot types by dVNC pair
dVNC_pairs = all_simple_layers.index
layer_types = [all_simple_layers, motor_simple_layers, ascending_simple_layers, proprio_1o_layers, proprio_2o_layers,
somato_1o_layers, somato_2o_layers, goro_simple_layers, basins_simple_layers, A00c_simple_layers]
col = ['Greens', 'Reds', 'Blues', 'Purples', 'Purples', 'GnBu', 'GnBu', 'Reds', 'Purples', 'Blues']
dVNC_list = []
for pair in dVNC_pairs:
mat = np.zeros(shape=(len(layer_types), len(all_simple_layers.columns)))
for i, layer_type in enumerate(layer_types):
mat[i, :] = layer_type.loc[pair]
dVNC_list.append(mat)
# loop through pairs to plot
for i, dVNC in enumerate(dVNC_list):
data = pd.DataFrame(dVNC, index = ['All', 'Motor', 'Ascend', 'Proprio-1', 'Proprio-2', 'Somato-1', 'Somato-2', 'Gorogoro', 'Basins', 'A00c'])
mask_list = []
for i_iter in range(0, len(data.index)):
mask = np.full((len(data.index),len(data.columns)), True, dtype=bool)
mask[i_iter, :] = [False]*len(data.columns)
mask_list.append(mask)
fig, axs = plt.subplots(
1, 1, figsize=(.8, 1.25)
)
for j, mask in enumerate(mask_list):
if((j in [0,1])):
vmax = 60
if((j in [2])):
vmax = 20
if((j in [3, 4, 5, 6])):
vmax = 40
if((j in [7, 8, 9])):
vmax = 10
ax = axs
annotations = data.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(data, annot = annotations, fmt = 's', mask = mask, cmap=col[j], vmax = vmax, cbar=False, ax = ax)
plt.savefig(f'VNC_interaction/plots/individual_dVNC_paths/{i}_dVNC-{dVNC_pairs[i]}_Threshold-{threshold}_individual-path.pdf', bbox_inches='tight')
# %%
# main figure summary of individual dVNC paths
MN_exclusive= [15639294, 18305433, 10553248, 10728328]
ascending1 = [19361427, 11013583, 19298644, 5690425, 6446394]
ascending2 = [17777031, 10609443]
ascending_postMN = [20556072, 10728333, 19298625, 10018150]
dVNC_types_name = ['MN-exclusive', 'Ascending-1o', 'Ascending-2o', 'Ascending-postMN']
dVNC_types = [MN_exclusive, ascending1, ascending2, ascending_postMN]
dVNC_types = [[Promat.get_paired_skids(x, pairs) for x in sublist] for sublist in dVNC_types] # convert leftid's to both skids from each pair
dVNC_types = [sum(x, []) for x in dVNC_types] # unlist nested lists
# multihop downstream
type_paths = []
for index in tqdm(range(0, len(dVNC_types))):
ds_dVNC = VNC_adj.downstream_multihop(list(dVNC_types[index]), threshold, min_members = 0, hops=5)
type_paths.append(ds_dVNC)
# identifying different cell types in dVNC pathways
all_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, VNC_adj.adj.index) # include all neurons to get total number of neurons per layer
motor_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, A1_MN)
ascending_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, A1_ascending)
goro_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, gorogoro)
basins_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, basins)
A00c_simple_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, A00c) # no contact
proprio_1o_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, proprio_1o)
proprio_2o_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, proprio_2o)
somato_1o_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, somato_1o)
somato_2o_layers_type,_ = VNC_adj.layer_id(type_paths, dVNC_types_name, somato_2o)
# split plot types by dVNC pair
layer_types = [all_simple_layers_type, motor_simple_layers_type, ascending_simple_layers_type, proprio_1o_layers_type, proprio_2o_layers_type,
somato_1o_layers_type, somato_2o_layers_type, goro_simple_layers_type, basins_simple_layers_type, A00c_simple_layers_type]
col = ['Greens', 'Reds', 'Blues', 'Purples', 'Purples', 'GnBu', 'GnBu', 'Reds', 'Purples', 'Blues']
dVNC_type_list = []
for name in dVNC_types_name:
mat = np.zeros(shape=(len(layer_types), len(all_simple_layers.columns)))
for i, layer_type in enumerate(layer_types):
mat[i, :] = layer_type.loc[name]
dVNC_type_list.append(mat)
# loop through pairs to plot
for i, dVNC in enumerate(dVNC_type_list):
data = pd.DataFrame(dVNC, index = ['All', 'Motor', 'Ascend', 'Proprio-1', 'Proprio-2', 'Somato-1', 'Somato-2', 'Gorogoro', 'Basins', 'A00c'])
mask_list = []
for i_iter in range(0, len(data.index)):
mask = np.full((len(data.index),len(data.columns)), True, dtype=bool)
mask[i_iter, :] = [False]*len(data.columns)
mask_list.append(mask)
fig, axs = plt.subplots(
1, 1, figsize=(.8, 1.25)
)
for j, mask in enumerate(mask_list):
if((j in [0,1])):
vmax = 60
if((j in [2])):
vmax = 20
if((j in [3, 4, 5, 6])):
vmax = 40
if((j in [7, 8, 9])):
vmax = 10
ax = axs
annotations = data.astype(int).astype(str)
annotations[annotations=='0']=''
sns.heatmap(data, annot = annotations, fmt = 's', mask = mask, cmap=col[j], vmax = vmax, cbar=False, ax = ax)
plt.savefig(f'VNC_interaction/plots/individual_dVNC_paths/Type_{i}_dVNC-{dVNC_types_name[i]}_Threshold-{threshold}_individual-path.pdf', bbox_inches='tight')
# %%
# export different neuron types at each VNC layer
def readable_df(skids_list):
max_length = max([len(x) for x in skids_list])
df = pd.DataFrame()
for i, layer in enumerate(skids_list):
skids = list(layer)
if(len(layer)==0):
skids = ['']
if(len(skids) != max_length):
skids = skids + ['']*(max_length-len(skids))
df[f'Layer {i}'] = skids
return(df)
readable_df(ds_dVNC_layers_skids.MN).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ds_dVNC_MN_layers_{str(date.today())}.csv', index = False)
readable_df(ds_dVNC_layers_skids.Proprio).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ds_dVNC_Proprio_layers_{str(date.today())}.csv', index = False)
readable_df(ds_dVNC_layers_skids.Somato).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ds_dVNC_Somato_layers_{str(date.today())}.csv', index = False)
readable_df(ascendings_layers_skids.MN).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ascendings_MN_layers_{str(date.today())}.csv', index = False)
readable_df(ascendings_layers_skids.Proprio).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ascendings_Proprio_layers_{str(date.today())}.csv', index = False)
readable_df(ascendings_layers_skids.Somato).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ascendings_Somato_layers_{str(date.today())}.csv', index = False)
# %%
# export ds-dVNCs and dVNCs
pd.DataFrame(ds_dVNC).to_csv(f'VNC_interaction/data/csvs/Threshold-{threshold}_ds_dVNC_{str(date.today())}.csv', index = False)
| pd.DataFrame(source_dVNC) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.