prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
assert panel.values.dtype == np.object_
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
assert panel[i].values.dtype.name == dtype
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(
np.random.randn(2, 10, 5),
items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5),
dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
df2 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=lrange(5))
panel = Panel.from_dict({'a': df1, 'b': df2}, dtype=dtype)
_check_dtype(panel, dtype)
def test_constructor_fails_with_not_3d_input(self):
msg = "The number of dimensions required is 3"
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(10, 2))
def test_ctor_orderedDict(self):
keys = list(set(np.random.randint(0, 5000, 100)))[
:50] # unique random int keys
d = OrderedDict([(k, mkdf(10, 5)) for k in keys])
p = Panel(d)
assert list(p.items) == keys
p = Panel.from_dict(d)
assert list(p.items) == keys
def test_from_dict_mixed_orient(self):
df = tm.makeDataFrame()
df['foo'] = 'bar'
data = {'k1': df, 'k2': df}
panel = Panel.from_dict(data, orient='minor')
assert panel['foo'].values.dtype == np.object_
assert panel['A'].values.dtype == np.float64
def test_constructor_error_msgs(self):
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(4, 5, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(4), lrange(5), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 4, 5\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(4), lrange(5))
msg = (r"Shape of passed values is \(3, 4, 5\), "
r"indices imply \(5, 5, 4\)")
with pytest.raises(ValueError, match=msg):
Panel(np.random.randn(3, 4, 5),
lrange(5), lrange(5), lrange(4))
def test_apply_slabs(self):
# with multi-indexes
# GH7469
index = MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), (
'two', 'a'), ('two', 'b')])
dfa = DataFrame(np.array(np.arange(12, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
dfb = DataFrame(np.array(np.arange(10, 22, dtype='int64')).reshape(
4, 3), columns=list("ABC"), index=index)
p = Panel({'f': dfa, 'g': dfb})
result = p.apply(lambda x: x.sum(), axis=0)
# on windows this will be in32
result = result.astype('int64')
expected = p.sum(0)
assert_frame_equal(result, expected)
def test_apply_no_or_zero_ndim(self):
# GH10332
self.panel = Panel(np.random.rand(5, 5, 5))
result_int = self.panel.apply(lambda df: 0, axis=[1, 2])
result_float = self.panel.apply(lambda df: 0.0, axis=[1, 2])
result_int64 = self.panel.apply(
lambda df: np.int64(0), axis=[1, 2])
result_float64 = self.panel.apply(lambda df: np.float64(0.0),
axis=[1, 2])
expected_int = expected_int64 = Series([0] * 5)
expected_float = expected_float64 = Series([0.0] * 5)
assert_series_equal(result_int, expected_int)
assert_series_equal(result_int64, expected_int64)
assert_series_equal(result_float, expected_float)
assert_series_equal(result_float64, expected_float64)
def test_fillna(self):
# limit not implemented when only value is specified
p = Panel(np.random.randn(3, 4, 5))
p.iloc[0:2, 0:2, 0:2] = np.nan
pytest.raises(NotImplementedError,
lambda: p.fillna(999, limit=1))
def test_to_frame_multi_major(self):
idx = MultiIndex.from_tuples(
[(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
expected_idx = MultiIndex.from_tuples(
[
(1, 'one', 'A'), (1, 'one', 'B'),
(1, 'one', 'C'), (1, 'two', 'A'),
(1, 'two', 'B'), (1, 'two', 'C'),
(2, 'one', 'A'), (2, 'one', 'B'),
(2, 'one', 'C'), (2, 'two', 'A'),
(2, 'two', 'B'), (2, 'two', 'C')
],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1, 'a', 1, 2, 'b', 1, 3,
'c', 1, 4, 'd', 1],
'i2': [1, 'a', 1, 2, 'b',
1, 3, 'c', 1, 4, 'd', 1]},
index=expected_idx)
result = wp.to_frame()
assert_frame_equal(result, expected)
wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
assert_frame_equal(result, expected[1:])
idx = MultiIndex.from_tuples(
[(1, 'two'), (1, 'one'), (2, 'one'), (np.nan, 'two')])
df = DataFrame([[1, 'a', 1], [2, 'b', 1],
[3, 'c', 1], [4, 'd', 1]],
columns=['A', 'B', 'C'], index=idx)
wp = Panel({'i1': df, 'i2': df})
ex_idx = MultiIndex.from_tuples([(1, 'two', 'A'), (1, 'two', 'B'),
(1, 'two', 'C'),
(1, 'one', 'A'),
(1, 'one', 'B'),
(1, 'one', 'C'),
(2, 'one', 'A'),
(2, 'one', 'B'),
(2, 'one', 'C'),
(np.nan, 'two', 'A'),
(np.nan, 'two', 'B'),
(np.nan, 'two', 'C')],
names=[None, None, 'minor'])
expected.index = ex_idx
result = wp.to_frame()
assert_frame_equal(result, expected)
def test_to_frame_multi_major_minor(self):
cols = MultiIndex(levels=[['C_A', 'C_B'], ['C_1', 'C_2']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
idx = MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (
2, 'two'), (3, 'three'), (4, 'four')])
df = DataFrame([[1, 2, 11, 12], [3, 4, 13, 14],
['a', 'b', 'w', 'x'],
['c', 'd', 'y', 'z'], [-1, -2, -3, -4],
[-5, -6, -7, -8]], columns=cols, index=idx)
wp = Panel({'i1': df, 'i2': df})
exp_idx = MultiIndex.from_tuples(
[(1, 'one', 'C_A', 'C_1'), (1, 'one', 'C_A', 'C_2'),
(1, 'one', 'C_B', 'C_1'), (1, 'one', 'C_B', 'C_2'),
(1, 'two', 'C_A', 'C_1'), (1, 'two', 'C_A', 'C_2'),
(1, 'two', 'C_B', 'C_1'), (1, 'two', 'C_B', 'C_2'),
(2, 'one', 'C_A', 'C_1'), (2, 'one', 'C_A', 'C_2'),
(2, 'one', 'C_B', 'C_1'), (2, 'one', 'C_B', 'C_2'),
(2, 'two', 'C_A', 'C_1'), (2, 'two', 'C_A', 'C_2'),
(2, 'two', 'C_B', 'C_1'), (2, 'two', 'C_B', 'C_2'),
(3, 'three', 'C_A', 'C_1'), (3, 'three', 'C_A', 'C_2'),
(3, 'three', 'C_B', 'C_1'), (3, 'three', 'C_B', 'C_2'),
(4, 'four', 'C_A', 'C_1'), (4, 'four', 'C_A', 'C_2'),
(4, 'four', 'C_B', 'C_1'), (4, 'four', 'C_B', 'C_2')],
names=[None, None, None, None])
exp_val = [[1, 1], [2, 2], [11, 11], [12, 12],
[3, 3], [4, 4],
[13, 13], [14, 14], ['a', 'a'],
['b', 'b'], ['w', 'w'],
['x', 'x'], ['c', 'c'], ['d', 'd'], [
'y', 'y'], ['z', 'z'],
[-1, -1], [-2, -2], [-3, -3], [-4, -4],
[-5, -5], [-6, -6],
[-7, -7], [-8, -8]]
result = wp.to_frame()
expected = DataFrame(exp_val, columns=['i1', 'i2'], index=exp_idx)
assert_frame_equal(result, expected)
def test_to_frame_multi_drop_level(self):
idx = MultiIndex.from_tuples([(1, 'one'), (2, 'one'), (2, 'two')])
df = DataFrame({'A': [np.nan, 1, 2]}, index=idx)
wp = Panel({'i1': df, 'i2': df})
result = wp.to_frame()
exp_idx = MultiIndex.from_tuples(
[(2, 'one', 'A'), (2, 'two', 'A')],
names=[None, None, 'minor'])
expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
assert_frame_equal(result, expected)
def test_panel_dups(self):
# GH 4960
# duplicates in an index
# items
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, items=list("ABCDE"))
panel = Panel(data, items=list("AACDE"))
expected = no_dup_panel['A']
result = panel.iloc[0]
assert_frame_equal(result, expected)
expected = no_dup_panel['E']
result = panel.loc['E']
assert_frame_equal(result, expected)
# major
data = np.random.randn(5, 5, 5)
no_dup_panel = Panel(data, major_axis=list("ABCDE"))
panel = Panel(data, major_axis=list("AACDE"))
expected = no_dup_panel.loc[:, 'A']
result = panel.iloc[:, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, 'E']
result = panel.loc[:, 'E']
assert_frame_equal(result, expected)
# minor
data = np.random.randn(5, 100, 5)
no_dup_panel = Panel(data, minor_axis=list("ABCDE"))
panel = Panel(data, minor_axis=list("AACDE"))
expected = no_dup_panel.loc[:, :, 'A']
result = panel.iloc[:, :, 0]
assert_frame_equal(result, expected)
expected = no_dup_panel.loc[:, :, 'E']
result = panel.loc[:, :, 'E']
assert_frame_equal(result, expected)
def test_filter(self):
pass
def test_shift(self):
# mixed dtypes #6959
data = [('item ' + ch, makeMixedDataFrame())
for ch in list('abcde')]
data = dict(data)
mixed_panel = Panel.from_dict(data, orient='minor')
shifted = mixed_panel.shift(1)
assert_series_equal(mixed_panel.dtypes, shifted.dtypes)
def test_numpy_round(self):
values = [[[-3.2, 2.2], [0, -4.8213], [3.123, 123.12],
[-1566.213, 88.88], [-12, 94.5]],
[[-5.82, 3.5], [6.21, -73.272], [-9.087, 23.12],
[272.212, -99.99], [23, -76.5]]]
p = Panel(values, items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B'])
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.round(p, out=p)
# removing Panel before NumPy enforces, so just ignore
@pytest.mark.filterwarnings("ignore:Using a non-tuple:FutureWarning")
def test_multiindex_get(self):
ind = MultiIndex.from_tuples(
[('a', 1), ('a', 2), ('b', 1), ('b', 2)],
names=['first', 'second'])
wp = Panel(np.random.random((4, 5, 5)),
items=ind,
major_axis=np.arange(5),
minor_axis=np.arange(5))
f1 = wp['a']
f2 = wp.loc['a']
assert (f1.items == [1, 2]).all()
assert (f2.items == [1, 2]).all()
MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)],
names=['first', 'second'])
def test_repr_empty(self):
empty = | Panel() | pandas.core.panel.Panel |
# The ExchangeAgent expects a numeric agent id, printable name, agent type, timestamp to open and close trading,
# a list of equity symbols for which it should create order books, a frequency at which to archive snapshots
# of its order books, a pipeline delay (in ns) for order activity, the exchange computation delay (in ns),
# the levels of order stream history to maintain per symbol (maintains all orders that led to the last N trades),
# whether to log all order activity to the agent log, and a random state object (already seeded) to use
# for stochasticity.
from agent.FinancialAgent import FinancialAgent
from message.Message import Message
from util.OrderBook import OrderBook
from util.util import log_print, delist
import jsons as js
import pandas as pd
pd.set_option('display.max_rows', 500)
from copy import deepcopy
class ExchangeAgent(FinancialAgent):
def __init__(self, id, name, type, mkt_open, mkt_close, symbols, book_freq='S', pipeline_delay = 40000,
computation_delay = 1, stream_history = 0, log_orders = False, random_state = None):
super().__init__(id, name, type, random_state)
# Do not request repeated wakeup calls.
self.reschedule = False
# Store this exchange's open and close times.
self.mkt_open = mkt_open
self.mkt_close = mkt_close
# Right now, only the exchange agent has a parallel processing pipeline delay. This is an additional
# delay added only to order activity (placing orders, etc) and not simple inquiries (market operating
# hours, etc).
self.pipeline_delay = pipeline_delay
# Computation delay is applied on every wakeup call or message received.
self.computation_delay = computation_delay
# The exchange maintains an order stream of all orders leading to the last L trades
# to support certain agents from the auction literature (GD, HBL, etc).
self.stream_history = stream_history
# Log all order activity?
self.log_orders = log_orders
# Create an order book for each symbol.
self.order_books = {}
for symbol in symbols:
self.order_books[symbol] = OrderBook(self, symbol)
# At what frequency will we archive the order books for visualization and analysis?
self.book_freq = book_freq
# The exchange agent overrides this to obtain a reference to an oracle.
# This is needed to establish a "last trade price" at open (i.e. an opening
# price) in case agents query last trade before any simulated trades are made.
# This can probably go away once we code the opening cross auction.
def kernelInitializing (self, kernel):
super().kernelInitializing(kernel)
self.oracle = self.kernel.oracle
# Obtain opening prices (in integer cents). These are not noisy right now.
for symbol in self.order_books:
try:
self.order_books[symbol].last_trade = self.oracle.getDailyOpenPrice(symbol, self.mkt_open)
log_print ("Opening price for {} is {}", symbol, self.order_books[symbol].last_trade)
except AttributeError as e:
log_print(str(e))
# The exchange agent overrides this to additionally log the full depth of its
# order books for the entire day.
def kernelTerminating (self):
super().kernelTerminating()
# If the oracle supports writing the fundamental value series for its
# symbols, write them to disk.
if hasattr(self.oracle, 'f_log'):
for symbol in self.oracle.f_log:
dfFund = pd.DataFrame(self.oracle.f_log[symbol])
dfFund.set_index('FundamentalTime', inplace=True)
self.writeLog(dfFund, filename='fundamental_{}'.format(symbol))
print("Fundamental archival complete.")
if self.book_freq is None:
return
elif self.book_freq == 'all': # log all orderbook updates
self.logOrderBook()
else:
# Iterate over the order books controlled by this exchange.
for symbol in self.order_books:
book = self.order_books[symbol]
# Log full depth quotes (price, volume) from this order book at some pre-determined frequency.
# Here we are looking at the actual log for this order book (i.e. are there snapshots to export,
# independent of the requested frequency).
if book.book_log:
# This must already be sorted by time because it was a list of order book snapshots and time
# only increases in our simulation. BUT it can have duplicates if multiple orders happen
# in the same nanosecond. (This particularly happens if using nanoseconds as the discrete
# but fine-grained unit for more theoretic studies.)
dfLog = pd.DataFrame(book.book_log)
dfLog.set_index('QuoteTime', inplace=True)
# With multiple quotes in a nanosecond, use the last one, then resample to the requested freq.
dfLog = dfLog[~dfLog.index.duplicated(keep='last')]
dfLog.sort_index(inplace=True)
dfLog = dfLog.resample(self.book_freq).ffill()
dfLog.sort_index(inplace=True)
# Create a fully populated index at the desired frequency from market open to close.
# Then project the logged data into this complete index.
time_idx = pd.date_range(self.mkt_open, self.mkt_close, freq=self.book_freq, closed='right')
dfLog = dfLog.reindex(time_idx, method='ffill')
dfLog.sort_index(inplace=True)
dfLog = dfLog.stack()
dfLog.sort_index(inplace=True)
# Get the full range of quotes at the finest possible resolution.
quotes = sorted(dfLog.index.get_level_values(1).unique())
min_quote = quotes[0]
max_quote = quotes[-1]
quotes = range(min_quote, max_quote+1)
# Restructure the log to have multi-level rows of all possible pairs of time and quote
# with volume as the only column.
filledIndex = | pd.MultiIndex.from_product([time_idx, quotes], names=['time','quote']) | pandas.MultiIndex.from_product |
import os
import pandas as pd
import matplotlib.pyplot as plt
import time
# <NAME>, 2017
# Models thermal equilibrium of a subducting body into the mantle as a function of time.
# Assumption is a vertical body of 100km thickness (can adjust) going into an isotherm mantle (can also adjust to add
# a temperature gradient.
# Spits out a pretty plot and a rather large output file.
if "therm_eq.csv" in os.listdir(os.getcwd()):
os.remove("therm_eq.csv")
Kappa = 0.000005 * 3.154*10**7 * 1*10**-6 # convert seconds to years, m^2 to km^2, the thermal diffusivity
deltaX = 1 # km, change in position per iteration
deltaTime = (0.2 * deltaX**2) / Kappa # years, the time in between iterations
boundary_T = 0 # degK, isotherm, can add geotherm below in the for loop
body_T = 1800 # degK, downgoing body initial temperature
body_thickness = 100 #km, the thickness of the downgoing body into the mantle
max_time_interations = 26000 # number of model iterations
curr_time_iteration = 1 # tracks current model iteraiton
print("\nModel Parameters:\nKappa: {} km^2/yr\ndeltaTime: {} years\ndeltaX: {} km\nSlab Thickness: {} km\nMax model iterations: {} ({} billion years)\n".format(
Kappa, deltaTime, deltaX, body_thickness, max_time_interations, (max_time_interations*deltaTime) / (1*10**9)))
depth = [0]
half_depth = list(range(round((body_thickness + 2)/2)))
for i in half_depth:
depth.append(i)
adjusted_depth = []
for i in list(reversed(depth))[:-2]:
adjusted_depth.append(i)
for i in half_depth:
adjusted_depth.append(i)
df = pd.DataFrame({'Depth': adjusted_depth, "Initial Condition": [body_T for i in list(range(len(adjusted_depth)))]})
for i in list(range(max_time_interations)):
time = str(curr_time_iteration)
prev_time_iteration = str(curr_time_iteration - 1)
df[time] = boundary_T
vals = []
if i != max_time_interations + 1:
if curr_time_iteration != 1:
vals.append(boundary_T)
# for row in df['Depth'].ix[1:len(adjusted_depth) - 1]:
for row in df.index[1:len(adjusted_depth) - 1]:
curr_radius = int(df['Depth'][row])
# print("Index: {}\nCurr_Radius: {}\nt-deltat: {}\nprev_row: {}\nrow+1: {}\nrow-1: {}".format(
# row, curr_radius, prev_time_iteration, df[str(prev_time_iteration)][row],
# df[str(prev_time_iteration)][row + 1], df[str(prev_time_iteration)][row - 1]))
if int(df['Depth'][row]) == 0:
T = df[prev_time_iteration][row] + ((Kappa * deltaTime) * (((df[str(prev_time_iteration)][row + 1] +
df[str(prev_time_iteration)][row - 1] - (
2 * df[str(prev_time_iteration)][
row])) / deltaX ** 2)))
vals.append(T)
else:
T = df[prev_time_iteration][row] + ((Kappa*deltaTime)*(((df[str(prev_time_iteration)][row + 1] +
df[str(prev_time_iteration)][row - 1] - (2*df[str(prev_time_iteration)][row]))/deltaX**2)) +
((2/curr_radius) * ((df[str(prev_time_iteration)][row + 1] -
df[str(prev_time_iteration)][row - 1])/(2*deltaTime))))
# term1 = df[prev_time_iteration][row]
# term2 = (Kappa*deltaTime)*(((df[str(prev_time_iteration)][row + 1] +
# df[str(prev_time_iteration)][row - 1] - (2*df[str(prev_time_iteration)][row]))/deltaX**2))
# term3 = (Kappa*deltaTime)*((2/curr_radius) * ((df[str(prev_time_iteration)][row + 1] -
# df[str(prev_time_iteration)][row - 1])/(2*deltaTime)))
# print('\nTerm 1: {} Term 2: {} Term 3: {}\n'.format(term1, term2, term3))
vals.append(T)
vals.append(boundary_T)
else:
time = str(curr_time_iteration)
prev_time_iteration = str(curr_time_iteration - 1)
df[time] = ''
df2 = | pd.DataFrame({time: []}) | pandas.DataFrame |
"""
Tools to clean Balancing area data.
A data cleaning step is performed by an object that subclasses
the `BaDataCleaner` class.
"""
import os
import logging
import time
import re
from gridemissions.load import BaData
from gridemissions.eia_api import SRC, KEYS
import pandas as pd
import numpy as np
from collections import defaultdict
import cvxpy as cp
import dask
A = 1e4 # MWh
GAMMA = 10 # MWh
EPSILON = 1 # MWh
def na_stats(data, title, cols):
"""
Print NA statistics for a subset of a dataframe.
"""
print(
"%s:\t%.2f%%"
% (
title,
(
data.df.loc[:, cols].isna().sum().sum()
/ len(data.df)
/ len(data.df.loc[:, cols].columns)
* 100
),
)
)
class BaDataCleaner(object):
"""
Template class for data cleaning.
This is mostly just a shell to show how cleaning classes should operate.
"""
def __init__(self, ba_data):
"""
Parameters
----------
ba_data : BaData object
"""
self.d = ba_data
self.logger = logging.getLogger("clean")
def process(self):
pass
class BaDataBasicCleaner(BaDataCleaner):
"""
Basic data cleaning class.
We run this as the first step of the cleaning process.
"""
def process(self):
self.logger.info("Running BaDataBasicCleaner")
start = time.time()
data = self.d
missing_D_cols = [col for col in data.NG_cols if col not in data.D_cols]
self.logger.info("Adding demand columns for %d bas" % len(missing_D_cols))
for ba in missing_D_cols:
data.df.loc[:, data.KEY["D"] % ba] = 1.0
data.df.loc[:, data.KEY["NG"] % ba] -= 1.0
data.df.loc[:, data.KEY["TI"] % ba] -= 1.0
# AVRN only exports to BPAT - this is missing for now
if "AVRN" not in data.ID_cols:
self.logger.info("Adding trade columns for AVRN")
ba = "AVRN"
ba2 = "BPAT"
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = (
data.df.loc[:, data.KEY["NG"] % ba] - 1.0
)
data.df.loc[:, data.KEY["ID"] % (ba2, ba)] = (
-data.df.loc[:, data.KEY["NG"] % ba] + 1.0
)
# Add columns for biomass and geothermal for CISO
# We are assuming constant generation for each of these sources
# based on historical data. Before updating this, need to
# contact the EIA API maintainers to understand why this isn't
# reported and where to find it
self.logger.info("Adding GEO and BIO columns for CISO")
data.df.loc[:, "EBA.CISO-ALL.NG.GEO.H"] = 900.0
data.df.loc[:, "EBA.CISO-ALL.NG.BIO.H"] = 600.0
# data.df.loc[:, "EBA.CISO-ALL.NG.H"] += 600.0 + 900.0
# Add columns for the BAs that are outside of the US
foreign_bas = list(
set([col for col in data.ID_cols2 if col not in data.NG_cols])
)
self.logger.info(
"Adding demand, generation and TI columns for %d foreign bas"
% len(foreign_bas)
)
for ba in foreign_bas:
trade_cols = [col for col in data.df.columns if "%s.ID.H" % ba in col]
TI = -data.df.loc[:, trade_cols].sum(axis=1)
data.df.loc[:, data.KEY["TI"] % ba] = TI
exports = TI.apply(lambda x: max(x, 0))
imports = TI.apply(lambda x: min(x, 0))
data.df.loc[:, data.KEY["D"] % ba] = -imports
data.df.loc[:, data.KEY["NG"] % ba] = exports
if ba in ["BCHA", "HQT", "MHEB"]:
# Assume for these Canadian BAs generation is hydro
data.df.loc[:, data.KEY["SRC_WAT"] % ba] = exports
else:
# And all others are OTH (other)
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = exports
for col in trade_cols:
ba2 = re.split(r"\.|-|_", col)[1]
data.df.loc[:, data.KEY["ID"] % (ba, ba2)] = -data.df.loc[:, col]
# Make sure that trade columns exist both ways
for col in data.get_cols(field="ID"):
ba = re.split(r"\.|-|_", col)[1]
ba2 = re.split(r"\.|-|_", col)[2]
othercol = data.KEY["ID"] % (ba2, ba)
if othercol not in data.df.columns:
self.logger.info("Adding %s" % othercol)
data.df.loc[:, othercol] = -data.df.loc[:, col]
# Filter unrealistic values using self.reject_dict
self._create_reject_dict()
cols = (
data.get_cols(field="D")
+ data.get_cols(field="NG")
+ data.get_cols(field="TI")
+ data.get_cols(field="ID")
)
for col in cols:
s = data.df.loc[:, col]
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0]) & (s <= self.reject_dict[col][1])
)
# Do the same for the generation by source columns
# If there is no generation by source, add one that is OTH
# Edge case for solar:
# There are a lot of values at -50 MWh or so during the night. We want
# to set those to 0, but consider that very negative values (below
# -1GW) are rejected
for ba in data.regions:
missing = True
for src in SRC:
col = data.KEY["SRC_%s" % src] % ba
if col in data.df.columns:
missing = False
s = data.df.loc[:, col]
if src == "SUN":
self.reject_dict[col] = (-1e3, 200e3)
data.df.loc[:, col] = s.where(
(s >= self.reject_dict[col][0])
& (s <= self.reject_dict[col][1])
)
if src == "SUN":
data.df.loc[:, col] = data.df.loc[:, col].apply(
lambda x: max(x, 0)
)
if missing:
data.df.loc[:, data.KEY["SRC_OTH"] % ba] = data.df.loc[
:, data.KEY["NG"] % ba
]
# Reinitialize fields
self.logger.info("Reinitializing fields")
data = BaData(df=data.df)
self.r = data
self.logger.info("Basic cleaning took %.2f seconds" % (time.time() - start))
def _create_reject_dict(self):
"""
Create a defaultdict to store ranges outside of which values are
considered unrealistic.
The default range is (-1., 200e3) MW. Manual ranges can be set for
specific columns here if that range is not strict enough.
"""
reject_dict = defaultdict(lambda: (-1.0, 200e3))
for col in self.d.get_cols(field="TI"):
reject_dict[col] = (-100e3, 100e3)
for col in self.d.get_cols(field="ID"):
reject_dict[col] = (-100e3, 100e3)
reject_dict["EBA.AZPS-ALL.D.H"] = (1.0, 30e3)
reject_dict["EBA.BANC-ALL.D.H"] = (1.0, 6.5e3)
reject_dict["EBA.BANC-ALL.TI.H"] = (-5e3, 5e3)
reject_dict["EBA.CISO-ALL.NG.H"] = (5e3, 60e3)
self.reject_dict = reject_dict
def rolling_window_filter(
df,
offset=10 * 24,
min_periods=100,
center=True,
replace_nan_with_mean=True,
return_mean=False,
):
"""
Apply a rolling window filter to a dataframe.
Filter using dynamic bounds: reject points that are farther than 4 standard
deviations from the mean, using a rolling window to compute the mean and
standard deviation.
Parameters
----------
df : pd.DataFrame
Dataframe to filter
offset : int
Passed on to pandas' rolling function
min_periods : int
Passed on to pandas' rolling function
center : bool
Passed on to pandas' rolling function
replace_nan_with_mean : bool
Whether to replace NaNs with the mean of the timeseries at the end of
the procedure
Notes
-----
Keeps at least 200 MWh around the mean as an acceptance range.
"""
for col in df.columns:
rolling_ = df[col].rolling(offset, min_periods=min_periods, center=center)
mean_ = rolling_.mean()
std_ = rolling_.std().apply(lambda x: max(100, x))
ub = mean_ + 4 * std_
lb = mean_ - 4 * std_
idx_reject = (df[col] >= ub) | (df[col] <= lb)
df.loc[idx_reject, col] = np.nan
if replace_nan_with_mean:
# First try interpolating linearly, but only for up to 3 hours
df.loc[:, col] = df.loc[:, col].interpolate(limit=3)
# If there is more than 3 hours of missing data, use rolling mean
df.loc[df[col].isnull(), col] = mean_.loc[df[col].isnull()]
if return_mean:
mean_ = df.rolling(offset, min_periods=min_periods, center=center).mean()
return (df, mean_)
return df
class BaDataRollingCleaner(BaDataCleaner):
"""
Rolling window cleaning.
This applies the `rolling_window_filter` function to the dataset. In order
to apply this properly to the beginning of the dataset, we load past data
that will be used for the cleaning - that is then dropped.
"""
def process(self, file_name="", folder_hist="", nruns=2):
"""
Processor function for the cleaner object.
Parameters
----------
file_name : str
Base name of the file from which to read historical data.
Data is read from "%s_basic.csv" % file_name
folder_hist : str
Folder from which to read historical data
nruns : int
Number of times to apply the rolling window procedure
Notes
-----
If we are not processing a large amount of data at a time, we may not
have enough data to appropriately estimate the rolling mean and
standard deviation for the rolling window procedure. If values are
given for `file_name` and `folder_hist`, data will be read from a
historical dataset to estimate the rolling mean and standard deviation.
If there are very large outliers, they can 'mask' smaller outliers.
Running the rolling window procedure a couple of times helps with this
issue.
"""
self.logger.info("Running BaDataRollingCleaner (%d runs)" % nruns)
start = time.time()
data = self.d
# Remember what part we are cleaning
idx_cleaning = data.df.index
try:
# Load the data we already have in memory
df_hist = pd.read_csv(
os.path.join(folder_hist, "%s_basic.csv" % file_name),
index_col=0,
parse_dates=True,
)
# Only take the last 1,000 rows
# Note that if df_hist has less than 1,000 rows,
# pandas knows to select df_hist without raising an error.
df_hist = df_hist.iloc[-1000:]
# Overwrite with the new data
old_rows = df_hist.index.difference(data.df.index)
df_hist = data.df.append(df_hist.loc[old_rows, :], sort=True)
df_hist.sort_index(inplace=True)
except FileNotFoundError:
self.logger.info("No history file")
df_hist = data.df
# Apply rolling horizon threshold procedure
# 20200206 update: don't try replacing NaNs anymore, leave that to the
# next step
for _ in range(nruns):
df_hist = rolling_window_filter(df_hist, replace_nan_with_mean=False)
# Deal with NaNs
# First deal with NaNs by taking the average of the previous day and
# next day. In general we observe strong daily patterns so this seems
# to work well. Limit the filling procedure to one day at a time. If
# there are multiple missing days, this makes for a smoother transition
# between the two valid days. If we had to do this more than 4 times,
# give up and use forward and backward fills without limits
for col in df_hist.columns:
npasses = 0
while (df_hist.loc[:, col].isna().sum() > 0) and (npasses < 4):
npasses += 1
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(limit=1),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(limit=1),
],
axis=1,
).mean(axis=1)
if npasses == 4:
self.logger.debug("A lot of bad data for %s" % col)
df_hist.loc[:, col] = pd.concat(
[
df_hist.loc[:, col].groupby(df_hist.index.hour).ffill(),
df_hist.loc[:, col].groupby(df_hist.index.hour).bfill(),
],
axis=1,
).mean(axis=1)
# All bad data columns
if df_hist.loc[:, col].isna().sum() == len(df_hist):
df_hist.loc[:, col] = 0.0
# Some NaNs will still remain - try using the rolling mean average
df_hist, mean_ = rolling_window_filter(
df_hist, replace_nan_with_mean=True, return_mean=True
)
if df_hist.isna().sum().sum() > 0:
self.logger.warning("There are still some NaNs. Unexpected")
# Just keep the indices we are working on currently
data = BaData(df=df_hist.loc[idx_cleaning, :])
self.r = data
self.weights = mean_.loc[idx_cleaning, :].applymap(
lambda x: A / max(GAMMA, abs(x))
)
self.logger.info(
"Rolling window cleaning took %.2f seconds" % (time.time() - start)
)
class BaDataPyoCleaningModel(object):
"""
Create an AbstractModel() for the cleaning problem.
No data is passed into this model at this point, it is
simply written in algebraic form.
"""
def __init__(self):
m = pyo.AbstractModel()
# Sets
m.regions = pyo.Set()
m.srcs = pyo.Set()
m.regions2 = pyo.Set(within=m.regions * m.regions)
m.regions_srcs = pyo.Set(within=m.regions * m.srcs)
# Parameters
m.D = pyo.Param(m.regions, within=pyo.Reals)
m.NG = pyo.Param(m.regions, within=pyo.Reals)
m.TI = pyo.Param(m.regions, within=pyo.Reals)
m.ID = pyo.Param(m.regions2, within=pyo.Reals)
m.NG_SRC = pyo.Param(m.regions_srcs, within=pyo.Reals)
m.D_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.NG_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.TI_W = pyo.Param(m.regions, default=1.0, within=pyo.Reals)
m.ID_W = pyo.Param(m.regions2, default=1.0, within=pyo.Reals)
m.NG_SRC_W = pyo.Param(m.regions_srcs, default=1.0, within=pyo.Reals)
# Variables
# delta_NG_aux are aux variable for the case where there
# are no SRC data. In that case, the NG_sum constraint would
# only have: m.NG + m.delta_NG = 0.
m.delta_D = pyo.Var(m.regions, within=pyo.Reals)
m.delta_NG = pyo.Var(m.regions, within=pyo.Reals)
m.delta_TI = pyo.Var(m.regions, within=pyo.Reals)
m.delta_ID = pyo.Var(m.regions2, within=pyo.Reals)
m.delta_NG_SRC = pyo.Var(m.regions_srcs, within=pyo.Reals)
# m.delta_NG_aux = pyo.Var(m.regions, within=pyo.Reals)
# Constraints
m.D_positive = pyo.Constraint(m.regions, rule=self.D_positive)
m.NG_positive = pyo.Constraint(m.regions, rule=self.NG_positive)
m.NG_SRC_positive = pyo.Constraint(m.regions_srcs, rule=self.NG_SRC_positive)
m.energy_balance = pyo.Constraint(m.regions, rule=self.energy_balance)
m.antisymmetry = pyo.Constraint(m.regions2, rule=self.antisymmetry)
m.trade_sum = pyo.Constraint(m.regions, rule=self.trade_sum)
m.NG_sum = pyo.Constraint(m.regions, rule=self.NG_sum)
# Objective
m.total_penalty = pyo.Objective(rule=self.total_penalty, sense=pyo.minimize)
self.m = m
def D_positive(self, model, i):
return (model.D[i] + model.delta_D[i]) >= EPSILON
def NG_positive(self, model, i):
return (model.NG[i] + model.delta_NG[i]) >= EPSILON
def NG_SRC_positive(self, model, k, s):
return model.NG_SRC[k, s] + model.delta_NG_SRC[k, s] >= EPSILON
def energy_balance(self, model, i):
return (
model.D[i]
+ model.delta_D[i]
+ model.TI[i]
+ model.delta_TI[i]
- model.NG[i]
- model.delta_NG[i]
) == 0.0
def antisymmetry(self, model, i, j):
return (
model.ID[i, j]
+ model.delta_ID[i, j]
+ model.ID[j, i]
+ model.delta_ID[j, i]
== 0.0
)
def trade_sum(self, model, i):
return (
model.TI[i]
+ model.delta_TI[i]
- sum(
model.ID[k, l] + model.delta_ID[k, l]
for (k, l) in model.regions2
if k == i
)
) == 0.0
def NG_sum(self, model, i):
return (
model.NG[i]
+ model.delta_NG[i] # + model.delta_NG_aux[i]
- sum(
model.NG_SRC[k, s] + model.delta_NG_SRC[k, s]
for (k, s) in model.regions_srcs
if k == i
)
) == 0.0
def total_penalty(self, model):
return (
sum(
(
model.D_W[i] * model.delta_D[i] ** 2
+ model.NG_W[i] * model.delta_NG[i] ** 2
# + model.delta_NG_aux[i]**2
+ model.TI_W[i] * model.delta_TI[i] ** 2
)
for i in model.regions
)
+ sum(
model.ID_W[i, j] * model.delta_ID[i, j] ** 2
for (i, j) in model.regions2
)
+ sum(
model.NG_SRC_W[i, s] * model.delta_NG_SRC[i, s] ** 2
for (i, s) in model.regions_srcs
)
)
class BaDataPyoCleaner(BaDataCleaner):
"""
Optimization-based cleaning class.
Uses pyomo to build the model and Gurobi as the default solver.
"""
def __init__(self, ba_data, weights=None, solver="gurobi"):
super().__init__(ba_data)
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
self.m = BaDataPyoCleaningModel().m
self.opt = SolverFactory(solver)
self.weights = weights
if weights is not None:
self.d.df = pd.concat(
[self.d.df, weights.rename(lambda x: x + "_W", axis=1)], axis=1
)
def process(self, debug=False):
start = time.time()
self.logger.info("Running BaDataPyoCleaner for %d rows" % len(self.d.df))
self.d.df = self.d.df.fillna(0)
if not debug:
self.r = self.d.df.apply(self._process, axis=1)
else:
r_list = []
delta_list = []
for idx, row in self.d.df.iterrows():
_, r, deltas = self._process(row, debug=True)
r_list.append(r)
delta_list.append(deltas)
self.r = pd.concat(r_list, axis=1).transpose()
self.deltas = pd.concat(delta_list, axis=1).transpose()
self.deltas.index = self.d.df.index
self.r.index = self.d.df.index
# Make sure the cleaning step performed as expected
self.r = BaData(df=self.r)
self.logger.info("Checking BAs...")
for ba in self.r.regions:
self.r.checkBA(ba)
self.logger.info("Execution took %.2f seconds" % (time.time() - start))
def _process(self, row, debug=False):
if row.isna().sum() > 0:
raise ValueError("Cannot call this method on data with NaNs")
i = self._create_instance(row)
self.opt.solve(i)
r = pd.concat(
[
pd.Series(
{
self.d.KEY["NG"] % k: (i.NG[k] + pyo.value(i.delta_NG[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["D"] % k: (i.D[k] + pyo.value(i.delta_D[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["TI"] % k: (i.TI[k] + pyo.value(i.delta_TI[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["ID"]
% (k1, k2): (i.ID[k1, k2] + pyo.value(i.delta_ID[k1, k2]))
for (k1, k2) in i.regions2
}
),
pd.Series(
{
self.d.KEY["SRC_%s" % s]
% k: (i.NG_SRC[k, s] + pyo.value(i.delta_NG_SRC[k, s]))
for (k, s) in i.regions_srcs
}
),
]
)
deltas = pd.concat(
[
pd.Series(
{
self.d.KEY["NG"] % k: (pyo.value(i.delta_NG[k]))
for k in i.regions
}
),
pd.Series(
{self.d.KEY["D"] % k: (pyo.value(i.delta_D[k])) for k in i.regions}
),
pd.Series(
{
self.d.KEY["TI"] % k: (pyo.value(i.delta_TI[k]))
for k in i.regions
}
),
pd.Series(
{
self.d.KEY["ID"] % (k1, k2): (pyo.value(i.delta_ID[k1, k2]))
for (k1, k2) in i.regions2
}
),
pd.Series(
{
self.d.KEY["SRC_%s" % s] % k: (pyo.value(i.delta_NG_SRC[k, s]))
for (k, s) in i.regions_srcs
}
),
]
)
if not debug:
return r
return i, r, deltas
def _create_instance(self, row):
def append_W(x):
return [c + "_W" for c in x]
NG_SRC_data = self._get_ng_src(row)
NG_SRC_data_W = self._get_ng_src(row, weights=True)
opt_data = {
None: {
"regions": {None: self.d.regions},
"srcs": {None: SRC},
"regions2": {
None: list(
set(
[
(re.split(r"\.|-|_", el)[1], re.split(r"\.|-|_", el)[2])
for el in self.d.df.columns
if "ID" in re.split(r"\.|-|_", el)
]
)
)
},
"regions_srcs": {None: list(NG_SRC_data.keys())},
"D": self._reduce_cols(row.loc[self.d.get_cols(field="D")].to_dict()),
"NG": self._reduce_cols(row.loc[self.d.get_cols(field="NG")].to_dict()),
"TI": self._reduce_cols(row.loc[self.d.get_cols(field="TI")].to_dict()),
"ID": self._reduce_cols(
row.loc[self.d.get_cols(field="ID")].to_dict(), nfields=2
),
"NG_SRC": NG_SRC_data,
}
}
if self.weights is not None:
opt_data[None]["D_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="D"))].to_dict()
)
opt_data[None]["NG_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="NG"))].to_dict()
)
opt_data[None]["TI_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="TI"))].to_dict()
)
opt_data[None]["ID_W"] = self._reduce_cols(
row.loc[append_W(self.d.get_cols(field="ID"))].to_dict(), nfields=2
)
opt_data[None]["NG_SRC_W"] = NG_SRC_data_W
instance = self.m.create_instance(opt_data)
return instance
def _reduce_cols(self, mydict, nfields=1):
"""
Helper function to simplify the names in a dictionary
"""
newdict = {}
for k in mydict:
if nfields == 1:
newk = re.split(r"\.|-|_", k)[1]
elif nfields == 2:
newk = (re.split(r"\.|-|_", k)[1], re.split(r"\.|-|_", k)[2])
else:
raise ValueError("Unexpected argument")
newdict[newk] = mydict[k]
return newdict
def _get_ng_src(self, r, weights=False):
"""
Helper function to get the NG_SRC data.
"""
mydict = {}
for ba in self.d.regions:
for src in SRC:
col = self.d.KEY["SRC_%s" % src] % ba
if weights:
col += "_W"
if col in self.d.df.columns:
mydict[(ba, src)] = r[col]
return mydict
class BaDataCvxCleaner(BaDataCleaner):
"""
Optimization-based cleaning class.
Uses cvxpy.
"""
def __init__(self, ba_data, weights=None):
super().__init__(ba_data)
self.weights = weights
if weights is not None:
self.d.df = pd.concat(
[self.d.df, weights.rename(lambda x: x + "_W", axis=1)], axis=1
)
def process(self, debug=False, with_ng_src=True):
start = time.time()
self.logger.info("Running BaDataCvxCleaner for %d rows" % len(self.d.df))
self.d.df = self.d.df.fillna(0)
results = []
def cvx_solve(row, regions, debug=False):
if row.isna().sum() > 0:
raise ValueError("Cannot call this method on data with NaNs")
n_regions = len(regions)
D = row[[KEYS["E"]["D"] % r for r in regions]].values
D_W = [
el ** 0.5
for el in row[[KEYS["E"]["D"] % r + "_W" for r in regions]].values
]
NG = row[[KEYS["E"]["NG"] % r for r in regions]].values
NG_W = [
el ** 0.5
for el in row[[KEYS["E"]["NG"] % r + "_W" for r in regions]].values
]
TI = row[[KEYS["E"]["TI"] % r for r in regions]].values
TI_W = [
el ** 0.5
for el in row[[KEYS["E"]["TI"] % r + "_W" for r in regions]].values
]
delta_D = cp.Variable(n_regions, name="delta_D")
delta_NG = cp.Variable(n_regions, name="delta_NG")
delta_TI = cp.Variable(n_regions, name="delta_TI")
obj = (
cp.sum_squares(cp.multiply(D_W, delta_D))
+ cp.sum_squares(cp.multiply(NG_W, delta_NG))
+ cp.sum_squares(cp.multiply(TI_W, delta_TI))
)
ID = {}
ID_W = {}
for i, ri in enumerate(regions):
for j, rj in enumerate(regions):
if KEYS["E"]["ID"] % (ri, rj) in row.index:
ID[(ri, rj)] = row[KEYS["E"]["ID"] % (ri, rj)]
ID_W[(ri, rj)] = row[KEYS["E"]["ID"] % (ri, rj) + "_W"]
delta_ID = {k: cp.Variable(name=f"{k}") for k in ID}
constraints = [
D + delta_D >= 1.0,
NG + delta_NG >= 1.0,
D + delta_D + TI + delta_TI - NG - delta_NG == 0.0,
]
if with_ng_src:
NG_SRC = {}
NG_SRC_W = {}
for i, src in enumerate(SRC):
for j, r in enumerate(regions):
if KEYS["E"][f"SRC_{src}"] % r in row.index:
NG_SRC[(src, r)] = row[KEYS["E"][f"SRC_{src}"] % r]
NG_SRC_W[(src, r)] = row[KEYS["E"][f"SRC_{src}"] % r + "_W"]
delta_NG_SRC = {k: cp.Variable(name=f"{k}") for k in NG_SRC}
for k in NG_SRC:
constraints += [NG_SRC[k] + delta_NG_SRC[k] >= 1.0]
obj += NG_SRC_W[k] * delta_NG_SRC[k] ** 2
# Add the antisymmetry constraints twice is less efficient but not a huge deal.
for ri, rj in ID: # then (rj, ri) must also be in ID
constraints += [
ID[(ri, rj)]
+ delta_ID[(ri, rj)]
+ ID[(rj, ri)]
+ delta_ID[(rj, ri)]
== 0.0
]
obj += ID_W[(ri, rj)] * delta_ID[(ri, rj)] ** 2
for i, ri in enumerate(regions):
if with_ng_src:
constraints += [
NG[i]
+ delta_NG[i]
- cp.sum(
[
NG_SRC[(src, ri)] + delta_NG_SRC[(src, ri)]
for src in SRC
if (src, ri) in NG_SRC
]
)
== 0.0
]
constraints += [
TI[i]
+ delta_TI[i]
- cp.sum(
[
ID[(ri, rj)] + delta_ID[(ri, rj)]
for rj in regions
if (ri, rj) in ID
]
)
== 0.0
]
objective = cp.Minimize(obj)
prob = cp.Problem(objective, constraints)
prob.solve()
if with_ng_src:
r = pd.concat(
[
pd.Series(
NG + delta_NG.value,
index=[KEYS["E"]["NG"] % r for r in regions],
),
pd.Series(
D + delta_D.value,
index=[KEYS["E"]["D"] % r for r in regions],
),
pd.Series(
TI + delta_TI.value,
index=[KEYS["E"]["TI"] % r for r in regions],
),
pd.Series(
{KEYS["E"]["ID"] % k: ID[k] + delta_ID[k].value for k in ID}
),
pd.Series(
{
KEYS["E"][f"SRC_{s}"] % r: NG_SRC[(s, r)]
+ delta_NG_SRC[(s, r)].value
for (s, r) in NG_SRC
}
),
pd.Series({"CleaningObjective": prob.value})
]
)
else:
r = pd.concat(
[
pd.Series(
NG + delta_NG.value,
index=[KEYS["E"]["NG"] % r for r in regions],
),
pd.Series(
D + delta_D.value,
index=[KEYS["E"]["D"] % r for r in regions],
),
pd.Series(
TI + delta_TI.value,
index=[KEYS["E"]["TI"] % r for r in regions],
),
pd.Series(
{KEYS["E"]["ID"] % k: ID[k] + delta_ID[k].value for k in ID}
),
pd.Series({"CleaningObjective": prob.value})
]
)
if not debug:
return r
if with_ng_src:
deltas = pd.concat(
[
pd.Series(
delta_NG.value, index=[KEYS["E"]["NG"] % r for r in regions]
),
pd.Series(
delta_D.value, index=[KEYS["E"]["D"] % r for r in regions]
),
pd.Series(
delta_TI.value, index=[KEYS["E"]["TI"] % r for r in regions]
),
| pd.Series({KEYS["E"]["ID"] % k: delta_ID[k].value for k in ID}) | pandas.Series |
# -*- coding: utf-8 -*-
"""Tests for recall@k curve."""
# pylint: disable=protected-access,too-many-public-methods,no-self-use,too-few-public-methods,C0103
from __future__ import division
import numpy as np
import pandas as pd
from rankmetrics.metrics import recall_k_curve
class TestRecallKCurve(object):
"""Test class for the recall_k_curve method."""
def test_recall_k_curve_list(self):
"""Test recall_k_curve on a list."""
y_true = [[0, 0, 0, 1, 1]]
y_pred = [[0.1, 0.0, 0.0, 0.2, 0.3]]
actual = recall_k_curve(y_true, y_pred, max_k=3)
expected = [0.5, 1., 1.]
np.testing.assert_allclose(expected, actual)
def test_recall_k_curve_np(self):
"""Test recall_k_curve on a numpy array."""
y_true = np.array([[0, 0, 0, 1, 1]])
y_pred = np.array([[0.1, 0.0, 0.0, 0.2, 0.3]])
actual = recall_k_curve(y_true, y_pred, max_k=3)
expected = [0.5, 1., 1.]
np.testing.assert_allclose(expected, actual)
def test_recall_k_curve_pd(self):
"""Test recall_k_curve on a pandas data fame."""
y_true = pd.DataFrame([[0, 0, 0, 1, 1]])
y_pred = | pd.DataFrame([[0.1, 0.0, 0.0, 0.2, 0.3]]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Methods to perform coverage analysis.
@author: <NAME> <<EMAIL>>
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from typing import List, Optional
from shapely import geometry as geo
from datetime import datetime, timedelta
from skyfield.api import load, wgs84, EarthSatellite
from ..schemas.point import Point
from ..schemas.satellite import Satellite
from ..schemas.instrument import Instrument, DutyCycleScheme
from ..utils import (
compute_min_altitude,
swath_width_to_field_of_regard,
compute_max_access_time,
compute_orbit_period,
)
def collect_observations(
point: Point,
satellite: Satellite,
instrument: Instrument,
start: datetime,
end: datetime,
omit_solar: bool = True,
sample_distance: Optional[float] = None,
) -> gpd.GeoDataFrame:
"""
Collect single satellite observations of a geodetic point of interest.
:param point: The ground point of interest
:type point: :class:`tatc.schemas.point.Point`
:param satellite: The observing satellite
:type satellite: :class:`tatc.schemas.satellite.Satellite`
:param instrument: The instrument used to make observations
:type instrument::`tatc.schemas.instrument.instrument`
:param start: The start of the mission window
:type start::`datetime.datetime`
:param end: The end of the mission window
:type end::`datetime.datetime`
:param omit_solar: True, if solar angles should be omitted
to improve computational efficiency, defaults to True
:type omit_solar: bool, optional
:param sample_distance: Ground sample distance (m) to override
instrument field of regard, defaults to None
:type sample_distance: int, optional
:return: An instance of :class:`geopandas.GeoDataFrame` containing all
recorded reduce_observations
:rtype::`geopandas.GeoDataFrame`
"""
# build a topocentric point at the designated geodetic point
topos = wgs84.latlon(point.latitude, point.longitude)
# load the timescale and define starting and ending points
ts = load.timescale()
t0 = ts.from_datetime(start)
t1 = ts.from_datetime(end)
# load the ephemerides
eph = load("de421.bsp")
# convert orbit to tle
orbit = satellite.orbit.to_tle()
# construct a satellite for propagation
sat = EarthSatellite(orbit.tle[0], orbit.tle[1], satellite.name)
# compute the initial satellite height (altitude)
satellite_height = wgs84.subpoint(sat.at(t0)).elevation.m
# compute the minimum altitude angle required for observation
min_altitude = compute_min_altitude(
satellite_height,
instrument.field_of_regard
if sample_distance is None
else swath_width_to_field_of_regard(satellite_height, sample_distance),
)
# compute the maximum access time to filter bad data
max_access_time = timedelta(
seconds=compute_max_access_time(satellite_height, min_altitude)
)
# TODO: consider instrument operational intervals
ops_intervals = pd.Series(
[pd.Interval(pd.Timestamp(start), pd.Timestamp(end), "both")]
)
# find the set of observation events
t, events = sat.find_events(topos, t0, t1, altitude_degrees=min_altitude)
if omit_solar:
# basic dataframe without solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
}
)
else:
# extended dataframe including solar angles
df = pd.DataFrame(
{
"point_id": pd.Series([], dtype="int"),
"geometry": pd.Series([], dtype="object"),
"satellite": pd.Series([], dtype="str"),
"instrument": pd.Series([], dtype="str"),
"start": pd.Series([], dtype="datetime64[ns, utc]"),
"end": pd.Series([], dtype="datetime64[ns, utc]"),
"epoch": pd.Series([], dtype="datetime64[ns, utc]"),
"sat_alt": pd.Series([], dtype="float64"),
"sat_az": pd.Series([], dtype="float64"),
"sat_sunlit": pd.Series([], dtype="bool"),
"solar_alt": pd.Series([], dtype="float64"),
"solar_az": pd.Series([], dtype="float64"),
"solar_time": pd.Series([], dtype="float64"),
}
)
# define variables for stepping through the events list
t_rise = None
t_culminate = None
sat_sunlit = None
solar_time = None
sat_alt = None
sat_az = None
solar_alt = None
solar_az = None
# check for geocentricity
if np.all(events == 1) and events:
# find the satellite altitude, azimuth, and distance at t0
sat_alt, sat_az, sat_dist = (sat - topos).at(t[0]).altaz()
# if ommiting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": start,
"epoch": start + (end - start) / 2,
"end": end,
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
"sat_sunlit": None,
"solar_alt": None,
"solar_az": None,
"solar_time": None
}, index=[0]
)
], ignore_index=True)
# compute the access time for the observation (end - start)
df["access"] = df["end"] - df["start"]
# compute the revisit time for each observation (previous end - start)
df["revisit"] = df["end"] - df["start"].shift()
return gpd.GeoDataFrame(df, geometry=df.geometry, crs="EPSG:4326")
for j in range(len(events)):
if events[j] == 0:
# record the rise time
t_rise = t[j].utc_datetime()
elif events[j] == 1:
# record the culmination time
t_culminate = t[j].utc_datetime()
# find the satellite altitude, azimuth, and distance
sat_alt, sat_az, sat_dist = (sat - topos).at(t[j]).altaz()
if not omit_solar or instrument.req_target_sunlit is not None:
# find the solar altitude, azimuth, and distance
solar_obs = (
(eph["earth"] + topos).at(t[j]).observe(eph["sun"]).apparent()
)
solar_alt, solar_az, solar_dist = solar_obs.altaz()
# find the local solar time
solar_time = solar_obs.hadec()[0].hours + 12
if not omit_solar or instrument.req_self_sunlit is not None:
# find whether the satellite is sunlit
sat_sunlit = sat.at(t[j]).is_sunlit(eph)
elif events[j] == 2:
# record the set time
t_set = t[j].utc_datetime()
# only record an observation if a previous rise and culminate
# events were recorded (sometimes they are out-of-order)
if t_rise is not None and t_culminate is not None:
# check if the observation meets minimum access duration,
# ground sunlit conditions, and satellite sunlit conditions
if (
instrument.min_access_time <= t_set - t_rise <= max_access_time * 2
and instrument.is_valid_observation(
eph,
ts.from_datetime(t_culminate),
sat.at(ts.from_datetime(t_culminate)),
)
and (
instrument.duty_cycle >= 1
or any(ops_intervals.apply(lambda i: t_culminate in i))
)
):
# if omitting solar angles
if omit_solar:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": pd.Timestamp(t_rise),
"epoch": pd.Timestamp(t_culminate),
"end": pd.Timestamp(t_set),
"sat_alt": sat_alt.degrees,
"sat_az": sat_az.degrees,
}, index=[0]
)
], ignore_index=True)
# otherwise if solar angles are included
else:
df = pd.concat([
df,
pd.DataFrame.from_records(
{
"point_id": point.id,
"geometry": geo.Point(point.longitude, point.latitude),
"satellite": satellite.name,
"instrument": instrument.name,
"start": | pd.Timestamp(t_rise) | pandas.Timestamp |
#!/usr/bin/python3
import argparse
from datetime import datetime, date, time, timedelta
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.offline as pyo
import platform
import sys
import tsutils as ts
from plotly.offline import init_notebook_mode
def samp():
open_data = [33.0, 33.3, 33.5, 33.0, 34.1]
high_data = [33.1, 33.3, 33.6, 33.2, 34.8]
low_data = [32.7, 32.7, 32.8, 32.6, 32.8]
close_data = [33.0, 32.9, 33.3, 33.1, 33.1]
dates = [datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10)]
fig = go.Figure(data=[go.Candlestick(x=dates,
open=open_data, high=high_data,
low=low_data, close=close_data)])
fig.show()
def samp3LB():
df = pd.read_csv('d:/3lb.csv', delimiter='\s+', converters={'date': lambda e: datetime.strptime(e, '%Y-%m-%d')})
colours = df['dirn'].map({-1: "red", 1: "green"})
xs = df['date'].dt.strftime('%m-%d')
fig = go.Figure(data=[go.Bar(x = xs, y = df['close']-df['open'], base = df['open'], marker=dict(color = colours))])
fig.update_xaxes(type='category')
fig.show()
# color="LightSeaGreen",
def draw_daily_lines(df, fig, tms, idxs):
for op, cl in idxs:
fig.add_vline(x=tms.iloc[op], line_width=1, line_dash="dash", line_color="blue")
fig.add_vline(x=tms.iloc[cl], line_width=1, line_dash="dash", line_color='grey')
y = df.Open.iloc[op]
fig.add_shape(type='line', x0=tms.iloc[op], y0=y, x1=tms.iloc[cl], y1=y, line=dict(color='LightSeaGreen', dash='dot'))
def highs(df, window):
hs = df['High'].rolling(window, center=True).max()
hsv = df['High'][np.equal(df['High'], hs)]
t = pd.Series.diff(hsv)
# remove 0 elements
return df.High[t[t.ne(0)].index]
def lows(df, window):
hs = df['Low'].rolling(window, center=True).min()
hsv = df['Low'][np.equal(df['Low'], hs)]
# remove adjacent values which are the same by differencing and removing 0
t = pd.Series.diff(hsv)
return df.Low[t[t.ne(0)].index]
def peaks(df, tms, fig):
hs = highs(df, 21)
ls = lows(df, 21)
# fig = go.Figure()
# fig.add_trace(go.Scatter(x=tm, y=df['High'][:600], line=dict(color='orange')))
# fig.add_trace(go.Scatter(x=tm, y=df['Low'][:600], line=dict(color='cyan')))
# fig.add_trace(go.Scatter(
# x=[tm[i] for i in xs[0]],
# y=[df.High[j] for j in xs[0]],
# mode='markers',
# marker=dict(size=8, color='green', symbol='cross' ),
# name='Detected Peaks' ))
fig.add_trace(go.Scatter(
x=[tms[i] for i in hs.index],
y=hs.add(1),
text=['%.2f' % y for y in hs],
mode='text',
textposition="top center",
name='local high' ))
fig.add_trace(go.Scatter(
x=[tms[i] for i in ls.index],
y=ls.sub(1),
text=['%.2f' % y for y in ls],
mode='text',
textposition="bottom center",
name='local low' ))
def bar_containing(df, dt):
return (df['Date'] <= dt) & (df['DateCl'] > dt)
# return a high and low range to nearest multiple of n
def make_yrange(df, op, cl, n):
h = df['High'][op:cl].max() + n // 2
l = df['Low'][op:cl].min() - n // 2
return (l // n)*n, ((h // n) + 1)*n
# pair of start_index:end_index suitable for use with iloc[s:e]
def make_day_index(df):
# filter by hour > 21 since holidays can have low volume
is_first_bar = (df['Date'].diff().fillna( | pd.Timedelta(hours=1) | pandas.Timedelta |
from datetime import datetime
import numpy as np
import pytest
from pandas.core.dtypes.cast import find_common_type, is_dtype_equal
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameCombineFirst:
def test_combine_first_mixed(self):
a = Series(["a", "b"], index=range(2))
b = Series(range(2), index=range(2))
f = DataFrame({"A": a, "B": b})
a = Series(["a", "b"], index=range(5, 7))
b = Series(range(2), index=range(5, 7))
g = DataFrame({"A": a, "B": b})
exp = DataFrame({"A": list("abab"), "B": [0, 1, 0, 1]}, index=[0, 1, 5, 6])
combined = f.combine_first(g)
tm.assert_frame_equal(combined, exp)
def test_combine_first(self, float_frame):
# disjoint
head, tail = float_frame[:5], float_frame[5:]
combined = head.combine_first(tail)
reordered_frame = float_frame.reindex(combined.index)
tm.assert_frame_equal(combined, reordered_frame)
assert tm.equalContents(combined.columns, float_frame.columns)
tm.assert_series_equal(combined["A"], reordered_frame["A"])
# same index
fcopy = float_frame.copy()
fcopy["A"] = 1
del fcopy["C"]
fcopy2 = float_frame.copy()
fcopy2["B"] = 0
del fcopy2["D"]
combined = fcopy.combine_first(fcopy2)
assert (combined["A"] == 1).all()
tm.assert_series_equal(combined["B"], fcopy["B"])
tm.assert_series_equal(combined["C"], fcopy2["C"])
tm.assert_series_equal(combined["D"], fcopy["D"])
# overlap
head, tail = reordered_frame[:10].copy(), reordered_frame
head["A"] = 1
combined = head.combine_first(tail)
assert (combined["A"][:10] == 1).all()
# reverse overlap
tail["A"][:10] = 0
combined = tail.combine_first(head)
assert (combined["A"][:10] == 0).all()
# no overlap
f = float_frame[:10]
g = float_frame[10:]
combined = f.combine_first(g)
tm.assert_series_equal(combined["A"].reindex(f.index), f["A"])
tm.assert_series_equal(combined["A"].reindex(g.index), g["A"])
# corner cases
comb = float_frame.combine_first(DataFrame())
tm.assert_frame_equal(comb, float_frame)
comb = DataFrame().combine_first(float_frame)
tm.assert_frame_equal(comb, float_frame)
comb = float_frame.combine_first(DataFrame(index=["faz", "boo"]))
assert "faz" in comb.index
# #2525
df = DataFrame({"a": [1]}, index=[datetime(2012, 1, 1)])
df2 = DataFrame(columns=["b"])
result = df.combine_first(df2)
assert "b" in result
def test_combine_first_mixed_bug(self):
idx = Index(["a", "b", "c", "e"])
ser1 = Series([5.0, -9.0, 4.0, 100.0], index=idx)
ser2 = | Series(["a", "b", "c", "e"], index=idx) | pandas.Series |
from .Data import Data
from .Zemberek import Zemberek
from .Esanlam import Esanlam
import pandas as pd
import re
import json
from collections import OrderedDict
from operator import itemgetter
import os
from .Config import dirs as dirs
from .Stops import Stops
from .ITUNLPTools import ITUNLPTools
class Main():
stats = []
num = 1
def __init__(self):
print(dirs._path)
if not os.path.exists(dirs._path + dirs._datapath):
os.makedirs(dirs._path + dirs._datapath)
if not os.path.exists(dirs._path + dirs._datapath + "\\" + dirs._processdir):
os.makedirs(dirs._path + dirs._datapath + "\\" + dirs._processdir)
self.Zemberek = Zemberek()
self.Data = Data()
self.Esanlam = Esanlam()
self.Stops = Stops()
self.ITUNLPTools = ITUNLPTools()
def is_str(self, v):
return type(v) is str
def while_replace(self, string, neddle, haystack):
while neddle in string: string = string.replace(neddle, haystack)
return string
def Tokenize(self, area, newarea=False):
if (newarea == False): newarea = area
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishTextToken(x))
def jsonunicode(self, data):
if isinstance(data, str):
return json.dumps(json.loads(data), ensure_ascii=False)
else:
return ""
def fixUnicode(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.jsonunicode(x))
def fixChars(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__fixcharsworker(x))
def __fixcharsworker(self, x):
if isinstance(x, list):
x = " ".join(x)
newtext = ""
length = 0
charbefore = ""
i = 0
for char in x:
if char == charbefore:
length += 1
if length < 2:
newtext += char
else:
newtext += char
length = 0
i += 1
charbefore = char
if (x != newtext): print(x, newtext)
return self.Zemberek.TurkishTextToken(newtext)
def Clean(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__cleanerworker(x))
def __cleanerworker(self, x):
if isinstance(x, list):
x = " ".join(x)
x = x.replace('-', '')
x = x.replace("'", '')
x = x.replace("â", 'a')
x = x.replace("İ", "i")
x = x.replace("î", 'i')
x = x.replace("î", 'i')
x = re.sub(re.compile(r"[-'\"]"), '', x)
x = re.sub(re.compile(r"[\\][ntrv]"), ' ', x)
x = re.sub(re.compile(r'[^a-zA-ZçığöüşÇİĞÖÜŞ ]'), ' ', x)
x = self.while_replace(x, " ", " ")
x = x.lower()
x = self.Zemberek.TurkishTextToken(x)
return x
def __cleanword(self, x):
if isinstance(x, list):
x = " ".join(x)
x = x.replace('-', '')
x = x.replace("'", '')
x = x.replace("â", 'a')
x = x.replace("İ", "i")
x = x.replace("î", 'i')
x = x.replace("î", 'i')
x = re.sub(re.compile(r"[-'\"]"), '', x)
x = re.sub(re.compile(r"[\\][ntrv]"), ' ', x)
x = re.sub(re.compile(r'[^a-zA-ZçığöüşÇİĞÖÜŞ ]'), ' ', x)
x = self.while_replace(x, " ", "")
x = x.lower()
return x
def lower(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self._lowerworker(x))
def _lowerworker(self, text):
if isinstance(text, str):
tokens = self.Zemberek.TurkishTextToken(text)
else:
tokens = text
newtext = []
for token in tokens:
token = token.replace('İ', 'i')
token = token.replace("ardunio", 'arduino')
token = token.replace("nardunio", 'arduino')
token = token.lower()
newtext.append(token)
return newtext
def Normalize(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishNormalizer(x))
def NormalizeWords(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Zemberek.TurkishWordNormalizer(x))
def sorguBirlestir(self, x, sifirla=False):
if sifirla == True:
self.sorgumetni = ""
self.sorgumetni += "\n\n" + " ".join(x)
def ITUNormalize(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.sorguBirlestir("", True)
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.sorguBirlestir(x))
print(self.sorgumetni)
print(self.ITUNLPTools.ask("normalize", self.sorgumetni))
def NormWithCorr(self, area, newarea):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
counts = self.TFBuilder(self.Data.datafrm[area])
corrects = self.Zemberek.TurkishSpellingWithNormal(counts)
suggestion = self.__suggestion(counts, corrects)
self.__changeColumn(area, newarea, suggestion)
def TFBuilder(self, text, topic=False):
counts = {}
for row in text:
if not isinstance(row, list):
if len(row) == 0:
row = [" "]
else:
row = row.split(" ")
for word in row:
word = word.replace('"', '')
if word in counts:
counts[word] += 1
else:
counts[word] = 1
self.stats.append([self.num, topic, len(counts)])
self.TFMean = sum(counts.values()) / len(counts.values())
return counts
def NormalTFBuilder(self, text, topic=False):
counts = self.TFBuilder(text, topic=False)
mx = max(counts.values())
mn = min(counts.values())
newcounts = {}
for key in counts.keys():
newcounts[key] = (counts[key] - mn) / (mx - mn)
return newcounts
def __SpellingSuggestion(self, count, suggest):
suggestion = dict()
for key, val in count.items():
suje = dict()
if suggest.get(key):
for sugval in suggest.get(key):
if count.get(key):
suje[sugval] = count.get(sugval)
else:
suje[sugval] = count.get(key) + 1
insert = dict()
for elem in suje.items():
if elem[1] is not None: insert[elem[0]] = elem[1]
insert = OrderedDict(sorted(insert.items(), key=itemgetter(1), reverse=True))
suggestion[key] = insert
replace = dict()
for key, val in suggestion.items():
if key != list(val)[0]:
pattern = key
change = list(val)[0]
replace[pattern] = str(change)
return replace
def __suggestion(self, count, suggest):
suggestion = dict()
for key, val in count.items():
suje = dict()
if suggest.get(key):
for sugval in suggest.get(key):
if count.get(key):
suje[sugval] = count.get(sugval)
else:
suje[sugval] = count.get(key) + 1
insert = dict()
for elem in suje.items():
if elem[1] is not None: insert[elem[0]] = elem[1]
insert = OrderedDict(sorted(insert.items(), key=itemgetter(1), reverse=True))
suggestion[key] = insert
replace = dict()
for key, val in suggestion.items():
if key != list(val)[0]:
pattern = key
change = list(val)[0]
replace[pattern] = str(change)
return replace
def correctSpelling(self, area, newarea=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
counts = self.NormalTFBuilder(self.Data.datafrm[area])
corrects = self.Zemberek.TurkishSpelling(counts)
suggestion = self.__SpellingSuggestion(counts, corrects)
self.__changeColumn(area, newarea, suggestion)
def jaccard(self, text1, text2):
set1 = set(text1)
set2 = set(text2)
similarity = len(set1.intersection(set2)) / len(set1.union(set2))
return similarity
def __SpellingSuggestion(self, count, suggest):
suggestion = dict()
for kelime, oneriler in suggest.items():
skor = {}
if len(suggest.get(kelime)) > 1:
gf = 0.1
for oneri in oneriler:
if count.get(oneri):
gf = count.get(oneri)
if gf < 0.1: gf = 0.1
oneri = oneri.lower()
oneri = self.__cleanword(oneri)
jaccard = self.jaccard(kelime, oneri)
sk = (gf * jaccard) ** 0.5
skor[oneri] = sk
skor = OrderedDict(sorted(skor.items(), key=itemgetter(1), reverse=True))
suggestion[kelime] = skor
replace = dict()
for key, val in suggestion.items():
if key != list(val)[0]:
pattern = key
change = list(val)[0]
replace[pattern] = str(change)
print("Öneriler", suggestion)
print("Düzeltme", replace)
return replace
def __changeColumn(self, area, newarea, suggestion):
if (newarea == False): newarea = area
if area != newarea: self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__changeWords(x, suggestion))
def __changeWords(self, row, suggestion):
new = list()
newstring = ""
c = 0
if len(row) > 0:
for kelime in row:
if kelime in suggestion.keys():
new.append(suggestion.get(kelime))
c += 1
elif len(kelime) > 1:
new.append(kelime.replace("[$&+,:;=?@#|'<>.-^*()%!]", ''))
if len(new) > 0:
return new
else:
return list(' ')
else:
return list(' ')
def Counter(self, area):
print("Starting to count: '" + str(area))
counts = self.TFBuilder(self.Data.datafrm[area])
print(str(len(counts)) + " words")
return counts
def MorphologyReplace(self, area, newarea=False, type="stem", length="max"):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
counts = self.TFBuilder(self.Data.datafrm[area])
corrects = self.Zemberek.Morphology(counts, type, length)
self.__changeColumn(area, newarea, corrects)
def tagNER(self, area, newarea=False, tagList=[]):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__tagnerworker(x, tagList))
def __tagnerworker(self, x, tagList=[]):
tags = self.Zemberek.TurkishNER(x)
newtext = []
for tag in tags:
if (tag[0] in tagList) or len(tagList) == 0:
newtext.append("_".join(tag[1]))
else:
newtext += tag[1]
return newtext
def tagTerms(self, area, newarea=False, tagList={}):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
tags = {}
maxsize = 0
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__tagtermsworker(x, tags))
def __tagtermsworker(self, x, tags={}):
if isinstance(x, str):
tokens = self.Zemberek.TurkishTextToken(x)
else:
tokens = x
match = False
for i in range(0, len(tokens)):
for k in range(max(tags.keys()) + 1, 1, -1):
if (i - k) >= 0:
sorgu = tokens[i - k:i]
if sorgu in tags[k]:
match = True
change = tags[sorgu]
tokens[i - k] = change
for j in range(i - k + 1, i):
tokens[j] = False
if False in tokens:
tokens.remove(False)
return tokens
def tagTermsto(self, area, newarea=False, tagList=pd.DataFrame(), maxlength=0):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(
lambda x: self.__tagtermstoworker(x, tagList, maxlength))
def __tagtermstoworker(self, x, tagList, maxLength):
if isinstance(x, str):
tokens = self.Zemberek.TurkishTextToken(x)
else:
tokens = x
match = False
for i in range(0, len(tokens)):
for k in range(maxLength + 1, 1, -1):
if (i - k) >= 0:
sorgu = tokens[i - k:i]
df = ""
try:
df = tagList[tagList["find"].map(tuple) == tuple(sorgu)].iloc()[0]["replace"]
except:
pass
if df != "":
match = True
tokens[i - k] = df
for j in range(i - k + 1, i):
tokens[j] = False
"""if sorgu in tags[k]:
index=tags.values ().index (sorgu)
match=True
tokens[i-k]=replace[index]
for j in range(i-k+1,i):
tokens[j]=False"""
while False in tokens:
tokens.remove(False)
if match == True:
print(x, tokens)
return tokens
def cleanNER(self, area, newarea=False, tagList=[]):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.__cleannerworker(x, tagList))
def __cleannerworker(self, x, tagList=[]):
tags = self.Zemberek.TurkishNER(x)
newtext = []
for tag in tags:
if (tag[0] not in tagList) or len(tagList) == 0:
for t in tag[1]:
newtext.append(t)
return newtext
def EsanlamDictReplace(self, area, newarea=False, dict=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
counts = self.TFBuilder(self.Data.datafrm[area])
corrects = self.Esanlam.fromDict(counts, dict)
suggestion = self.__suggestion(counts, corrects)
self.__changeColumn(area, newarea, suggestion)
def EsanlamDEUReplace(self, area, newarea=False, dict=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
counts = self.TFBuilder(self.Data.datafrm[area])
corrects = self.Esanlam.fromDEU(counts)
suggestion = self.__suggestion(counts, corrects)
self.__changeColumn(area, newarea, suggestion)
def cleanStops(self, area, newarea=False, method="file", file=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Stops.cleanStops(x, method, file))
def cleanWithQuery(self, area, newarea=False, query=False, fields=False):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
if query != False:
stops = self.Stops.loadQuery(query, fields)
else:
stops = self.Stops.loadQuery()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Stops.CleanWithList(x, stops))
def cleanWithList(self, area, newarea=False, stops=[]):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = pd.Series()
self.Data.datafrm[newarea] = self.Data.datafrm[area].apply(lambda x: self.Stops.CleanWithList(x, stops))
def cleanShorter(self, area, newarea=False, num=3):
if (newarea == False): newarea = area
if (area != newarea): self.Data.datafrm[newarea] = | pd.Series() | pandas.Series |
import os
import geopandas as gpd
import pandas as pd
import pytest
import trackintel as ti
from geopandas.testing import assert_geodataframe_equal
from pandas.testing import assert_frame_equal, assert_index_equal
from shapely.geometry import Point, Polygon, MultiPoint
from trackintel.io.from_geopandas import (
_trackintel_model,
read_locations_gpd,
read_positionfixes_gpd,
read_staypoints_gpd,
read_tours_gpd,
read_triplegs_gpd,
read_trips_gpd,
)
@pytest.fixture()
def example_positionfixes():
"""Model conform positionfixes to test with."""
p1 = Point(8.5067847, 47.4)
p2 = Point(8.5067847, 47.5)
p3 = Point(8.5067847, 47.6)
t1 = | pd.Timestamp("1971-01-01 04:00:00", tz="utc") | pandas.Timestamp |
import csv
import luigi
import numpy as np
import pandas as pd
from ..models import FitModel, PredictModel
class FitHistoricalMedian(FitModel):
model_name = 'historical_median'
def run(self):
data = self.requires()['data'].read()
data = data[data['Date'] <= self.deploy_date]
pages, medians = [], []
grouped_data = {page: page_data for page, page_data in data.groupby('Page')}
for page, page_data in grouped_data.items():
pages.append(page)
page_data = page_data.sort_values('Date')
try:
first_nonzero = page_data['Views'].nonzero()[0][0]
median = page_data.iloc[first_nonzero:, :]['Views'].median()
except IndexError:
median = 0.0
medians.append(median)
df = pd.DataFrame({'Page': pages, 'Median': medians}, columns=['Page', 'Median'])
df.to_csv(self.output().path, index=False, quoting=csv.QUOTE_NONNUMERIC)
class PredictHistoricalMedian(PredictModel):
model_name = 'historical_median'
def requires(self):
req = super().requires()
req['model'] = FitHistoricalMedian(
stage=self.stage,
imputation=self.imputation,
sample_ratio=self.sample_ratio,
deploy_date=self.deploy_date,
from_date=self.from_date,
to_date=self.to_date,
random_seed=self.random_seed)
return req
def run(self):
data = self.requires()['data'].read()
pages = data['Page'].unique()
dates = | pd.date_range(start=self.from_date, end=self.to_date) | pandas.date_range |
#!/usr/bin/env python3
"""
annotated_shortread_ranges.py
Python 3 code for mapping aligned pacbio reads to annotated transcripts
SAMPLE RUN:
time python ../intronomer-paper/benchmarking_data/annotate_shortread_ranges.py
-l HX1_final/processed_tx_df_HX1_02-21-2022_21.03.36.csv
-s HX1_final/granges-lrmap_sr-5-methods_SRR2911306-hx1.csv
-f ../immunotherapy/files/GRCh38.primary_assembly.genome.fa
-L HX1_final/reads_per_gene_and_transcript_HX1.tsv
-C HX1_final/mpile-sstat_gencode-v35_SRR2911306-hx1.csv
"""
import argparse
from datetime import datetime
import os
import pandas as pd
import subprocess as sp
_ACCEPTABLE_CHROMS = {
'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9',
'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17',
'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrM', 'chrX', 'chrY'
}
def jx_to_motif(jx, reference_fasta, samtools_path, hyphen=False):
"""Given an input junction and reference genome, transcribes RNA sequence.
Input:
jx: a junction in 'chr_;left;right;strand' format, with 0-based fully
closed coordinates, (string)
reference_fasta: reference genome fasta file, previously sorted and
indexed by samtools (path to fasta file, string)
samtools_path: to be called from subprocess to collect the sequence (path
to samtools executable, string)
Returns a left-to-right nucleotide sequence on either side of the aberrant
junction sufficiently long to generate the desired protein sequence.
"""
chrom = jx.split(':')[0]
left = jx.split(':')[1].split('-')[0]
right = jx.split('-')[1]
if chrom not in _ACCEPTABLE_CHROMS:
return ''
left_start = left
left_stop = str(int(left) + 1)
left_range = chrom + ':' + left_start + '-' + left_stop
left_output = sp.check_output(
['{}'.format(samtools_path), 'faidx', '{}'.format(reference_fasta),
'{}'.format(left_range)]
)
left_seq = ''.join(left_output.decode("utf-8").splitlines()[1:])
right_start = str(int(right) - 1)
right_stop = right
right_range = chrom + ':' + right_start + '-' + right_stop
right_output = sp.check_output(
['{}'.format(samtools_path), 'faidx', '{}'.format(reference_fasta),
'{}'.format(right_range)]
)
right_seq = ''.join(right_output.decode("utf-8").splitlines()[1:])
sequence = left_seq + right_seq
if hyphen:
sequence = sequence[:2] + '-' + sequence[-2:]
return sequence
def annotate_shortread_lrmap(tx_df, ranges_file, now, out_dir, ref_fasta,
samtools, l_counts, s_counts):
annotated_outfile = os.path.join(
output_dir, 'LR_annotated_{}'.format(os.path.basename(all_ranges))
)
sr_ct = 'short_read_gene_median_coverage'
lr_gene_ct = 'long_reads_per_gene'
lr_tx_ct = 'long_reads_per_transcript'
count_df = pd.read_table(l_counts, sep='\t')
count_df.rename(
{'reads_per_transcript': lr_tx_ct, 'reads_per_gene': lr_gene_ct},
axis=1, inplace=True
)
sr_count_df = pd.read_table(
s_counts, usecols=['gene.id', 'q50'], sep=',',
)
sr_count_df['q50'].fillna(0)
sr_count_df.rename(
{'gene.id': 'gene_id', 'q50': sr_ct}, axis=1, inplace=True
)
count_df = pd.merge(count_df, sr_count_df, how='outer', on='gene_id')
target_genes = set(count_df.loc[
(count_df[sr_ct] >= 2) & (count_df[lr_gene_ct] >= 5)
& (count_df[lr_tx_ct] >= 5)
]['gene_id'].tolist())
count_df = count_df.loc[(count_df[sr_ct] > 0) | (count_df[lr_gene_ct] > 0)]
canonical_motifs = {
'GTAG', 'GCAG', 'ATAC', 'CTAC', 'CTGC', 'GTAT'
}
result_df = pd.read_table(ranges_file, sep=',')
result_df['intron'] = result_df.apply(
lambda x: '{}:{}-{}'.format(x['seqnames'], x['start'], x['end']),
axis=1
)
result_df = pd.merge(result_df, tx_df, how='outer', on='intron')
result_df.rename(
{'position': 'intron_position_in_tx'}, axis=1, inplace=True
)
perst = 'persistence'
result_df['max_intron_persistence'] = result_df['intron'].apply(
lambda x: result_df.loc[result_df['intron'] == x][perst].max()
)
result_df['motif'] = result_df['intron'].apply(
lambda x: jx_to_motif(x, ref_fasta, samtools)
)
result_df['canonical_motif'] = result_df['motif'].apply(
lambda x: int(x in canonical_motifs)
)
result_df.dropna(subset=['transcript'], axis=0, inplace=True)
result_df = | pd.merge(result_df, count_df, how='left', on='transcript') | pandas.merge |
# %% imports
from datetime import datetime
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import download_file, medea_path, download_energy_balance, process_energy_balance
idx = pd.IndexSlice
eta_hydro_storage = 0.9
# ======================================================================================================================
# %% download and process opsd time series
url_opsd = 'https://data.open-power-system-data.org/time_series/latest/time_series_60min_singleindex.csv'
opsd_file = medea_path('data', 'raw', 'opsd_time_series_60min.csv')
download_file(url_opsd, opsd_file)
ts_opsd = | pd.read_csv(opsd_file) | pandas.read_csv |
### EPIC annotation with Reg feature
import pandas as pd
from numpy import genfromtxt
from itertools import chain
import sys
from collections import Counter
import functools
#The regulatory build (https://europepmc.org/articles/PMC4407537 http://grch37.ensembl.org/info/genome/funcgen/regulatory_build.html) was downloaded using biomart
Feature_bed = 'data/human_regulatory_features_GRCh37p13.txt'
background = 'data/passage_background.csv'
Feature_bed = | pd.read_csv(Feature_bed, header=None, names=['chr','start','end','Feature'],skiprows=1) | pandas.read_csv |
from typing import Dict
import SimpleITK
import tqdm
import json
from pathlib import Path
import tifffile
import numpy as np
import pytorch_lightning as pl
from pl_bolts.models.autoencoders.components import (resnet18_decoder,
resnet18_encoder,
)
from pl_bolts.models.autoencoders import VAE
import torch
import pandas as pd
import numpy as np
from PIL import Image
import cv2
from pretrainedmodels import se_resnext50_32x4d
from efficientnet_pytorch import EfficientNet
import torchvision.models as models
import torch.nn as nn
from torchvision import transforms
from evalutils import ClassificationAlgorithm
from evalutils.validators import (
UniquePathIndicesValidator,
UniqueImagesValidator,
)
from evalutils.io import ImageLoader
GPU = torch.cuda.is_available()
if GPU:
device = "cuda"
else:
device = "cpu"
class VAE2(pl.LightningModule):
"""
Standard VAE with Gaussian Prior and approx posterior.
Model is available pretrained on different datasets:
"""
def __init__(
self,
input_height: int,
enc_type: str = 'resnet18',
first_conv: bool = False,
maxpool1: bool = False,
enc_out_dim: int = 512,
kl_coeff: float = 0.1,
latent_dim: int = 256,
lr: float = 1e-5,
**kwargs
):
"""
Args:
input_height: height of the images
enc_type: option between resnet18 or resnet50
first_conv: use standard kernel_size 7, stride 2 at start or
replace it with kernel_size 3, stride 1 conv
maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2
enc_out_dim: set according to the out_channel count of
encoder used (512 for resnet18, 2048 for resnet50)
kl_coeff: coefficient for kl term of the loss
latent_dim: dim of latent space
lr: learning rate for Adam
"""
super(VAE2, self).__init__()
self.save_hyperparameters()
self.lr = lr
self.kl_coeff = kl_coeff
self.enc_out_dim = enc_out_dim
self.latent_dim = latent_dim
self.input_height = input_height
valid_encoders = {
'resnet18': {
'enc': resnet18_encoder,
'dec': resnet18_decoder,
},
}
self.encoder = valid_encoders[enc_type]['enc'](first_conv, maxpool1)
self.decoder = valid_encoders[enc_type]['dec'](self.latent_dim, self.input_height, first_conv, maxpool1)
self.fc_mu = nn.Linear(self.enc_out_dim, self.latent_dim)
self.fc_var = nn.Linear(self.enc_out_dim, self.latent_dim)
self.train_loss = 0
self.val_loss = 0
self.epoch = 0
self.images = []
self.input_images = []
self.val_step = 0
self.train_losses = []
def forward(self, x):
x = self.encoder(x)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
p, q, z = self.sample(mu, log_var)
return self.decoder(z)
def _run_step(self, x):
x = self.encoder(x)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
p, q, z = self.sample(mu, log_var)
return z, self.decoder(z), p, q
def sample(self, mu, log_var):
std = torch.exp(log_var / 2)
p = torch.distributions.Normal(torch.zeros_like(mu), torch.ones_like(std))
q = torch.distributions.Normal(mu, std)
z = q.rsample()
return p, q, z
def stepfadfadsf(self, batch, batch_idx):
x = batch
z, x_hat, p, q = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl = log_qz - log_pz
kl = kl.mean()
kl *= self.kl_coeff
loss = kl + recon_loss
logs = {
"recon_loss": recon_loss,
"kl": kl,
"loss": loss,
}
return loss, logs,
def training_step(self, batch, batch_idx):
x = batch
z, x_hat, p, q = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl = log_qz - log_pz
kl = kl.mean()
kl *= self.kl_coeff
loss = kl + recon_loss
#self.log_dict({f"train_{k}": v for k, v in logs.items()}, on_step=True, on_epoch=False)
self.train_losses.append(recon_loss.cpu().detach().numpy())
self.train_loss = recon_loss.cpu().detach().numpy()
return loss
def validation_step(self, batch, batch_idx):
x = batch
z, x_hat, p, q = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl = log_qz - log_pz
kl = kl.mean()
kl *= self.kl_coeff
loss = kl + recon_loss
#self.log_dict({f"val_{k}": v for k, v in logs.items()})
self.val_loss += recon_loss.cpu().detach().numpy()
imgs = x_hat.cpu().detach().numpy()
self.images.append([Image.fromarray((image*255).astype(np.uint8).transpose(1,2,0)) for image in imgs])
if self.epoch == 0 or self.epoch==1:
self.input_images.append([Image.fromarray((image*255).astype(np.uint8).transpose(1,2,0)) for image in x.cpu().detach().numpy()])
self.val_step += 1
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
def validation_epoch_end(self, outputs):
#print(self.epoch)
self.val_loss = self.val_loss / self.val_step
if len(self.train_losses) > 626:
self.train_loss = np.mean(self.train_losses[:-625])
if self.epoch == 0 or self.epoch == 1:
self.images = [item for sublist in self.images for item in sublist]
self.input_images = [item for sublist in self.input_images for item in sublist]
logs = {
"train_loss": self.train_loss,
"val_loss": self.val_loss,
"epoch": self.epoch,
"recons": [wandb.Image(image) for image in self.images[:100]],
"originals": [wandb.Image(image) for image in self.input_images[:100]],
}
self.images = []
self.input_images = []
else:
self.images = [item for sublist in self.images for item in sublist]
logs = {
"train_loss": self.train_loss,
"val_loss": self.val_loss,
"epoch": self.epoch,
"recons": [wandb.Image(image) for image in self.images[:100]],
}
self.images = []
save_str = "VAE_resnet18" + "_epoch+15_" + str(self.epoch) + ".pth"
torch.save(self.state_dict(), save_str)
shutil.copy(save_str, os.path.join(wandb.run.dir, save_str))
wandb.save(os.path.join(wandb.run.dir, save_str))
wandb.log(logs)
self.epoch += 1
self.val_step = 0
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential( # like the Composition layer you built
nn.Conv2d(3, 8, 3, stride=2, padding=1), #128x128x8
nn.ReLU(),
nn.Conv2d(8, 16, 3, stride=2, padding=1), #64x64x16
nn.ReLU(),
nn.Conv2d(16, 32, 3, stride=2, padding=1),#32x32x32
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=2, padding=1), #16x16x64
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=2, padding=1), #8x8x128
nn.ReLU(),
nn.Conv2d(128, 256, 3, stride=2, padding=1), #4x4x256
nn.ReLU(),
nn.Conv2d(256, 512, 3, stride=2, padding=1), #4x4x512
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(512, 256, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(256, 128, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 8, 3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(8, 3, 3, stride=2, padding=1, output_padding=1),
nn.Sigmoid()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def getOD(model, image):
results = model(image, size=288).pandas().xywhn[0]
cols = ["xcenter", "ycenter", "width", "height", "confidence", "class", "name"]
df = | pd.DataFrame([[0.5, 0.5, 1, 1, 0.001, 0, "OD"]], columns=cols) | pandas.DataFrame |
import datetime
import inspect
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
# #find parent directory and import model
# parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parentddir)
from ..earthworm_exe import Earthworm
test = {}
class TestEarthworm(unittest.TestCase):
"""
Unit tests for earthworm model.
"""
print("earthworm unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
setup the test as needed
e.g. pandas to open stir qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_earthworm_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty earthworm object
earthworm_empty = Earthworm(df_empty, df_empty)
return earthworm_empty
def test_earthworm_fugacity_unit(self):
"""
Test the only real earthworm method.
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
earthworm_empty = self.create_earthworm_object()
try:
expected_results = pd.Series([0.73699363, 1.908571, 5.194805], dtype='float')
earthworm_empty.k_ow = pd.Series([10.0, 100.0, 1000.0], dtype='float')
earthworm_empty.l_f_e = | pd.Series([0.01, 0.02, 0.03], dtype='float') | pandas.Series |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: | pd.Timestamp("2013-05-02 00:00:00") | pandas.Timestamp |
"""
Calculates an exchange-flow oriented salt budget from TEF terms, and
explores dynamical scaling:
does Qe behave as expected relative to dSbar_dx and K?
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import zfun
import tef_fun
import flux_fun
from importlib import reload
reload(flux_fun)
from time import time
from warnings import filterwarnings
filterwarnings('ignore') # skip some warning messages
# associated with lines like QQp[QQ<=0] = np.nan
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
parser.add_argument('-y', '--year', type=int, default=2017)
#parser.add_argument('-v', '--volume', type=str, default='Puget Sound')
args = parser.parse_args()
#which_vol = args.volume
year_str = str(args.year)
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir00 = Ldir['LOo'] + 'tef/'
indir0 = indir00 + run_name + '/'
indir = indir0 + 'flux/'
outdir = indir00 + 'sill_dyn_plots/'
Lfun.make_dir(outdir)
# get section definitions
sect_df = tef_fun.get_sect_df()
# get volumes
voldir = indir00 + 'volumes_' + Ldir['gridname'] + '/'
v_df = pd.read_pickle(voldir + 'volumes.p')
plt.close('all')
#for which_vol in ['Salish Sea', 'Puget Sound', 'Hood Canal', 'South Sound', 'Strait of Georgia']:
for which_vol in ['Puget Sound']:
# load low passed segment volume and net salt DataFrames
v_lp_df = pd.read_pickle(indir + 'daily_segment_volume.p')
sv_lp_df = pd.read_pickle(indir + 'daily_segment_net_salt.p')
# Note that we trim off the first list item because we are doing our
# dynamical calculations at something like the middle of a sill,
# e.g. ai2 where we can sensibly calculate gradients like dSbar_dx.
if which_vol == 'Salish Sea':
seg_list = list(v_lp_df.columns)
seg_list = seg_list[1:]
sect_sign_dict = {'jdf2':1}
elif which_vol == 'Puget Sound':
seg_list = (flux_fun.ssA + flux_fun.ssM + flux_fun.ssT
+ flux_fun.ssS + flux_fun.ssW + flux_fun.ssH)
# seg_list = seg_list[1:]
# sect_sign_dict = {'ai2':1}
seg_list = seg_list[3:]
sect_sign_dict = {'ai4':1}
elif which_vol == 'Hood Canal':
seg_list = flux_fun.ssH
seg_list = seg_list[1:]
sect_sign_dict = {'hc2':1}
elif which_vol == 'South Sound':
seg_list = flux_fun.ssT + flux_fun.ssS
seg_list = seg_list[1:]
sect_sign_dict = {'tn2':1}
elif which_vol == 'Strait of Georgia':
seg_list = flux_fun.ssG
seg_list = seg_list[1:]
sect_sign_dict = {'sji2':1}
v_lp_df = v_lp_df[seg_list]
sv_lp_df = sv_lp_df[seg_list]
river_list = []
for seg_name in seg_list:
seg = flux_fun.segs[seg_name]
river_list = river_list + seg['R']
riv_df = pd.read_pickle(Ldir['LOo'] + 'river/'
+ Ldir['gtag'] + '_'+year_str+'.01.01_'+year_str+'.12.31.p')
riv_df.index += timedelta(days=0.5)
riv_df = riv_df[river_list]
tef_df_dict = {}
for sn in sect_sign_dict.keys():
in_sign = sect_sign_dict[sn]
tef_df_dict[sn] = flux_fun.get_fluxes(indir0, sn, in_sign=in_sign)
vol_df, salt_df, vol_rel_err, salt_rel_err, salt_rel_err_qe = flux_fun.get_budgets(
sv_lp_df, v_lp_df, riv_df, tef_df_dict, seg_list)
# getting gradients across the seaward section
if which_vol == 'Salish Sea':
sea_sect = 'jdf1'; land_sect = 'jdf3'
sea_seg = 'J1'; land_seg = 'J2'
sill_name = 'Western Strait of Juan de Fuca'
elif which_vol == 'Puget Sound':
# sea_sect = 'ai1'; land_sect = 'ai3'
# sea_seg = 'A1'; land_seg = 'A2'
sea_sect = 'ai3'; land_sect = 'mb1'
sea_seg = 'A3'; land_seg = 'M1'
sill_name = 'Admiralty Inlet South'
elif which_vol == 'South Sound':
sea_sect = 'tn1'; land_sect = 'tn3'
sea_seg = 'T1'; land_seg = 'T2'
sill_name = 'Tacoma Narrows'
elif which_vol == 'Hood Canal':
sea_sect = 'hc1'; land_sect = 'hc3'
sea_seg = 'H1'; land_seg = 'H2'
sill_name = 'Hood Canal'
elif which_vol == 'Strait of Georgia':
sea_sect = 'sji1'; land_sect = 'sog1'
sea_seg = 'G1'; land_seg = 'G2'
sill_name = 'San Juan Islands'
df1 = flux_fun.get_fluxes(indir0, sea_sect)
df3 = flux_fun.get_fluxes(indir0, land_sect)
# get DX for dSbar_dx
sea_lon = (sect_df.loc[sea_sect,'x0'] + sect_df.loc[sea_sect,'x1'])/2
sea_lat = (sect_df.loc[sea_sect,'y0'] + sect_df.loc[sea_sect,'y1'])/2
land_lon = (sect_df.loc[land_sect,'x0'] + sect_df.loc[land_sect,'x1'])/2
land_lat = (sect_df.loc[land_sect,'y0'] + sect_df.loc[land_sect,'y1'])/2
mean_lon = (sea_lon + land_lon)/2
mean_lat = (sea_lat + land_lat)/2
sea_x, sea_y = zfun.ll2xy(sea_lon, sea_lat, mean_lon, mean_lat)
land_x, land_y = zfun.ll2xy(land_lon, land_lat, mean_lon, mean_lat)
DX = np.sqrt((sea_x-land_x)**2 + (sea_y-land_y)**2)
# various things for the dynamical scalings
dSbar_dx = ((df1['Sin']+df1['Sout'])/2-(df3['Sin']+df3['Sout'])/2)/DX
DF = df1['Ftide']-df3['Ftide'] # Net loss of tidal energy flux in region
A0 = v_df.loc[sea_seg,'area m2'] + v_df.loc[land_seg,'area m2']
V0 = v_df.loc[sea_seg,'volume m3'] + v_df.loc[land_seg,'volume m3']
H0 = V0/A0 # average depth of surrounding segments
B0 = V0/(H0*DX)
# dynamical scalings
a = 2.5 * 0.028 # the 2.5 is a fudge factor to get Qe to match Qe_pred
Cd = 2.6e-3
h = H0/2 # m
rho = 1027 # kg m-3
g = 9.8 # m2 s-1
beta = 7.7e-4
dyn_df = pd.DataFrame(index=salt_df.index)
Km = (a**3 * Cd**2 * h**3 * DF / (rho*A0))**(1/3)
Ks = Km/2.2
dyn_df['Km'] = Km
dyn_df['Ks'] = Ks
dyn_df['dSbar_dx'] = dSbar_dx
Ue = g * beta * dSbar_dx * H0**3 / (48 * Km)
dyn_df['Ue'] = Ue
dyn_df['Qe_pred'] = (Ue * B0 * H0 / 4)
dyn_df['Qe'] = salt_df['Qe']
dyn_df['DS_pred'] = H0**2 * dSbar_dx * Ue / (12 * Ks)
dyn_df['DS'] = salt_df['DS']
dyn_df['QeDS_pred'] = dyn_df['Qe_pred'] * dyn_df['DS_pred']
dyn_df['QeDS'] = salt_df['QeDS']
dyn_df['Qe_tide_pred'] = (vol_df['Qtide'])/7
for cn in dyn_df.columns:
dyn_df[cn] = | pd.to_numeric(dyn_df[cn]) | pandas.to_numeric |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": | Categorical([1, 2], categories=df.cat.cat.categories) | pandas.Categorical |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(
path, mode="a", driver="H5FD_CORE", driver_core_backing_store=0
)
store["df"] = df
store.append("df2", df)
tm.assert_frame_equal(store["df"], df)
tm.assert_frame_equal(store["df2"], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
left = store.get("a")
right = store["a"]
tm.assert_series_equal(left, right)
left = store.get("/a")
right = store["/a"]
tm.assert_series_equal(left, right)
with pytest.raises(KeyError, match="'No object named b in the file'"):
store.get("b")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(self, where, expected, setup_path):
# GH10143
objs = {
"df1": pd.DataFrame([1, 2, 3]),
"df2": pd.DataFrame([4, 5, 6]),
"df3": pd.DataFrame([6, 7, 8]),
"df4": pd.DataFrame([9, 10, 11]),
"s1": pd.Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(self, setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
with pytest.raises(AttributeError):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, "_{x}".format(x=x))
def test_put(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store["a"] = ts
store["b"] = df[:10]
store["foo/bar/bah"] = df[:10]
store["foo"] = df[:10]
store["/foo"] = df[:10]
store.put("c", df[:10], format="table")
# not OK, not a table
with pytest.raises(ValueError):
store.put("b", df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
_maybe_remove(store, "f")
with pytest.raises(ValueError):
store.put("f", df[10:], append=True)
# can't put to a table (use append instead)
with pytest.raises(ValueError):
store.put("c", df[10:], append=True)
# overwrite table
store.put("c", df[:10], format="table", append=False)
tm.assert_frame_equal(df[:10], store["c"])
def test_put_string_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
index = Index(
["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(20), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
# mixed length
index = Index(
["abcdefghijklmnopqrstuvwxyz1234567890"]
+ ["I am a very long string index: {i}".format(i=i) for i in range(20)]
)
s = Series(np.arange(21), index=index)
df = DataFrame({"A": s, "B": s})
store["a"] = s
tm.assert_series_equal(store["a"], s)
store["b"] = df
tm.assert_frame_equal(store["b"], df)
def test_put_compression(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
store.put("c", df, format="table", complib="zlib")
tm.assert_frame_equal(store["c"], df)
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="zlib")
@td.skip_if_windows_python_3
def test_put_compression_blosc(self, setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
# can't compress if format='fixed'
with pytest.raises(ValueError):
store.put("b", df, format="fixed", complib="blosc")
store.put("c", df, format="table", complib="blosc")
tm.assert_frame_equal(store["c"], df)
def test_complibs_default_settings(self, setup_path):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complevel=9)
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "zlib"
# Set complib and check to see if compression is disabled
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df", complib="zlib")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(setup_path) as tmpfile:
df.to_hdf(tmpfile, "df")
result = pd.read_hdf(tmpfile, "df")
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(setup_path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append("dfc", df, complevel=9, complib="blosc")
store.append("df", df)
store.close()
with tables.open_file(tmpfile, mode="r") as h5file:
for node in h5file.walk_nodes(where="/df", classname="Leaf"):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where="/dfc", classname="Leaf"):
assert node.filters.complevel == 9
assert node.filters.complib == "blosc"
def test_complibs(self, setup_path):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version("lzo"):
all_complibs.remove("lzo")
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(setup_path) as tmpfile:
gname = "foo"
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode="r")
for node in h5table.walk_nodes(where="/" + gname, classname="Leaf"):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self, setup_path):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal, setup_path)
@td.xfail_non_writeable
def test_put_mixed_type(self, setup_path):
df = tm.makeTimeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
# PerformanceWarning
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store.put("df", df)
expected = store.get("df")
tm.assert_frame_equal(expected, df)
@pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
def test_append(self, setup_path):
with ensure_clean_store(setup_path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
_maybe_remove(store, "df2")
store.put("df2", df[:10], format="table")
store.append("df2", df[10:])
tm.assert_frame_equal(store["df2"], df)
_maybe_remove(store, "df3")
store.append("/df3", df[:10])
store.append("/df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, "/df3 foo")
store.append("/df3 foo", df[:10])
store.append("/df3 foo", df[10:])
tm.assert_frame_equal(store["df3 foo"], df)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df["mixed_column"] = "testing"
df.loc[2, "mixed_column"] = np.nan
_maybe_remove(store, "df")
store.append("df", df)
tm.assert_frame_equal(store["df"], df)
# uints - test storage of uints
uint_data = DataFrame(
{
"u08": Series(
np.random.randint(0, high=255, size=5), dtype=np.uint8
),
"u16": Series(
np.random.randint(0, high=65535, size=5), dtype=np.uint16
),
"u32": Series(
np.random.randint(0, high=2 ** 30, size=5), dtype=np.uint32
),
"u64": Series(
[2 ** 58, 2 ** 59, 2 ** 60, 2 ** 61, 2 ** 62],
dtype=np.uint64,
),
},
index=np.arange(5),
)
_maybe_remove(store, "uints")
store.append("uints", uint_data)
tm.assert_frame_equal(store["uints"], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, "uints")
# 64-bit indices not yet supported
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
tm.assert_frame_equal(store["uints"], uint_data)
def test_append_series(self, setup_path):
with ensure_clean_store(setup_path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append("ss", ss)
result = store["ss"]
tm.assert_series_equal(result, ss)
assert result.name is None
store.append("ts", ts)
result = store["ts"]
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = "foo"
store.append("ns", ns)
result = store["ns"]
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select("ns", "foo>60")
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select("ns", "foo>70 and index<90")
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=["A"])
mi["B"] = np.arange(len(mi))
mi["C"] = "foo"
mi.loc[3:5, "C"] = "bar"
mi.set_index(["C", "B"], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append("mi", s)
tm.assert_series_equal(store["mi"], s)
def test_store_index_types(self, setup_path):
# GH5386
# test storing various index types
with ensure_clean_store(setup_path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df.index = index(len(df))
_maybe_remove(store, "df")
store.put("df", df, format=format)
tm.assert_frame_equal(df, store["df"])
for index in [
tm.makeFloatIndex,
tm.makeStringIndex,
tm.makeIntIndex,
tm.makeDateIndex,
]:
check("table", index)
check("fixed", index)
# period index currently broken for table
# seee GH7796 FIXME
check("fixed", tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
check("table", index)
check("fixed", index)
@pytest.mark.skipif(
not is_platform_little_endian(), reason="reason platform is not little endian"
)
def test_encoding(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A="foo", B="bar"), index=range(5))
df.loc[2, "A"] = np.nan
df.loc[3, "B"] = np.nan
_maybe_remove(store, "df")
store.append("df", df, encoding="ascii")
tm.assert_frame_equal(store["df"], df)
expected = df.reindex(columns=["A"])
result = store.select("df", Term("columns=A", encoding="ascii"))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"val",
[
[b"E\xc9, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"a", b"b", b"c"],
[b"EE, 17", b"", b"a", b"b", b"c"],
[b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],
[b"", b"a", b"b", b"c"],
[b"\xf8\xfc", b"a", b"b", b"c"],
[b"A\xf8\xfc", b"", b"a", b"b", b"c"],
[np.nan, b"", b"b", b"c"],
[b"A\xf8\xfc", np.nan, b"", b"b", b"c"],
],
)
@pytest.mark.parametrize("dtype", ["category", object])
def test_latin_encoding(self, setup_path, dtype, val):
enc = "latin-1"
nan_rep = ""
key = "data"
val = [x.decode(enc) if isinstance(x, bytes) else x for x in val]
ser = pd.Series(val, dtype=dtype)
with ensure_clean_path(setup_path) as store:
ser.to_hdf(store, key, format="table", encoding=enc, nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = ser.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
tm.assert_series_equal(
s_nan, retr, check_dtype=False, check_categorical=False
)
else:
tm.assert_series_equal(s_nan, retr)
# FIXME: don't leave commented-out
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"A": Series(np.random.randn(20)).astype("int32"),
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
# some nans
_maybe_remove(store, "df1")
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
store.append("df1", df[:10])
store.append("df1", df[10:])
tm.assert_frame_equal(store["df1"], df)
# first column
df1 = df.copy()
df1.loc[:, "A1"] = np.nan
_maybe_remove(store, "df1")
store.append("df1", df1[:10])
store.append("df1", df1[10:])
tm.assert_frame_equal(store["df1"], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, "A2"] = np.nan
_maybe_remove(store, "df2")
store.append("df2", df2[:10])
store.append("df2", df2[10:])
tm.assert_frame_equal(store["df2"], df2)
# datetimes
df3 = df.copy()
df3.loc[:, "E"] = np.nan
_maybe_remove(store, "df3")
store.append("df3", df3[:10])
store.append("df3", df3[10:])
tm.assert_frame_equal(store["df3"], df3)
def test_append_all_nans(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{"A1": np.random.randn(20), "A2": np.random.randn(20)},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# tests the option io.hdf.dropna_table
pd.set_option("io.hdf.dropna_table", False)
_maybe_remove(store, "df3")
store.append("df3", df[:10])
store.append("df3", df[10:])
tm.assert_frame_equal(store["df3"], df)
pd.set_option("io.hdf.dropna_table", True)
_maybe_remove(store, "df4")
store.append("df4", df[:10])
store.append("df4", df[10:])
tm.assert_frame_equal(store["df4"], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame(
{
"A1": np.random.randn(20),
"A2": np.random.randn(20),
"B": "foo",
"C": "bar",
"D": Timestamp("20010101"),
"E": datetime.datetime(2001, 1, 2, 0, 0),
},
index=np.arange(20),
)
df.loc[0:15, :] = np.nan
_maybe_remove(store, "df")
store.append("df", df[:10], dropna=True)
store.append("df", df[10:], dropna=True)
tm.assert_frame_equal(store["df"], df)
_maybe_remove(store, "df2")
store.append("df2", df[:10], dropna=False)
store.append("df2", df[10:], dropna=False)
tm.assert_frame_equal(store["df2"], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{"col1": [0, np.nan, 2], "col2": [1, np.nan, np.nan]}
)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df_with_missing", format="table")
reloaded = read_hdf(path, "df_with_missing")
tm.assert_frame_equal(df_with_missing, reloaded)
def test_read_missing_key_close_store(self, setup_path):
# GH 25766
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(path, "k2")
# smoke test to test that file is properly closed after
# read with KeyError before another write
df.to_hdf(path, "k2")
def test_read_missing_key_opened_store(self, setup_path):
# GH 28699
with ensure_clean_path(setup_path) as path:
df = pd.DataFrame({"a": range(2), "b": range(2)})
df.to_hdf(path, "k1")
store = pd.HDFStore(path, "r")
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
pd.read_hdf(store, "k2")
# Test that the file is still open after a KeyError and that we can
# still read from it.
pd.read_hdf(store, "k1")
def test_append_frame_column_oriented(self, setup_path):
with ensure_clean_store(setup_path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df.iloc[:, :2], axes=["columns"])
store.append("df1", df.iloc[:, 2:])
tm.assert_frame_equal(store["df1"], df)
result = store.select("df1", "columns=A")
expected = df.reindex(columns=["A"])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
expected = df.reindex(columns=["A"], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select("df1", "columns=A and index>df.index[4]")
def test_append_with_different_block_ordering(self, setup_path):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(setup_path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list("AB"))
df["index"] = range(10)
df["index"] += i * 10
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
if i % 2 == 0:
del df["int64"]
df["int64"] = Series([1] * len(df), dtype="int64")
if i % 3 == 0:
a = df.pop("A")
df["A"] = a
df.set_index("index", inplace=True)
store.append("df", df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(setup_path) as store:
df = DataFrame(np.random.randn(10, 2), columns=list("AB"), dtype="float64")
df["int64"] = Series([1] * len(df), dtype="int64")
df["int16"] = Series([1] * len(df), dtype="int16")
store.append("df", df)
# store additional fields in different blocks
df["int16_2"] = Series([1] * len(df), dtype="int16")
with pytest.raises(ValueError):
store.append("df", df)
# store multile additional fields in different blocks
df["float_3"] = Series([1.0] * len(df), dtype="float64")
with pytest.raises(ValueError):
store.append("df", df)
def test_append_with_strings(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big", df)
tm.assert_frame_equal(store.select("df_big"), df)
check_col("df_big", "values_block_1", 15)
# appending smaller string ok
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
store.append("df_big", df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select("df_big"), expected)
check_col("df_big", "values_block_1", 15)
# avoid truncation on elements
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
store.append("df_big2", df, min_itemsize={"values": 50})
tm.assert_frame_equal(store.select("df_big2"), df)
check_col("df_big2", "values_block_1", 50)
# bigger string on next append
store.append("df_new", df)
df_new = DataFrame(
[[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]]
)
with pytest.raises(ValueError):
store.append("df_new", df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index("C")
store.append("ss", df["B"], min_itemsize={"index": 4})
tm.assert_series_equal(store.select("ss"), df["B"])
# same as above, with data_columns=True
store.append(
"ss2", df["B"], data_columns=True, min_itemsize={"index": 4}
)
tm.assert_series_equal(store.select("ss2"), df["B"])
# min_itemsize in index without appending (GH 10381)
store.put("ss3", df, format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
store.append("ss3", df2)
tm.assert_frame_equal(store.select("ss3"), pd.concat([df, df2]))
# same as above, with a Series
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
store.append("ss4", df2["B"])
tm.assert_series_equal(
store.select("ss4"), pd.concat([df["B"], df2["B"]])
)
# with nans
_maybe_remove(store, "df")
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[1:4, "string"] = np.nan
df["string2"] = "bar"
df.loc[4:8, "string2"] = np.nan
df["string3"] = "bah"
df.loc[1:, "string3"] = np.nan
store.append("df", df)
result = store.select("df")
tm.assert_frame_equal(result, df)
with ensure_clean_store(setup_path) as store:
def check_col(key, name, size):
assert getattr(
store.get_storer(key).table.description, name
).itemsize, size
df = DataFrame(dict(A="foo", B="bar"), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, "df")
store.append("df", df, min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
check_col("df", "A", 200)
assert store.get_storer("df").data_columns == ["B", "A"]
# a min_itemsize that creates a data_column2
_maybe_remove(store, "df")
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
check_col("df", "B", 200)
check_col("df", "values_block_0", 200)
assert store.get_storer("df").data_columns == ["B"]
# infer the .typ on subsequent appends
_maybe_remove(store, "df")
store.append("df", df[:5], min_itemsize=200)
store.append("df", df[5:], min_itemsize=200)
tm.assert_frame_equal(store["df"], df)
# invalid min_itemsize keys
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
_maybe_remove(store, "df")
with pytest.raises(ValueError):
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
def test_append_with_empty_string(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with all empty strings (GH 12242)
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
store.append("df", df[:-1], min_itemsize={"x": 1})
store.append("df", df[-1:], min_itemsize={"x": 1})
tm.assert_frame_equal(store.select("df"), df)
def test_to_hdf_with_min_itemsize(self, setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(pd.read_hdf(path, "ss3"), pd.concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(
pd.read_hdf(path, "ss4"), pd.concat([df["B"], df2["B"]])
)
@pytest.mark.parametrize(
"format", [pytest.param("fixed", marks=td.xfail_non_writeable), "table"]
)
def test_to_hdf_errors(self, format, setup_path):
data = ["\ud800foo"]
ser = pd.Series(data, index=pd.Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = pd.read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_append_with_data_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc("B")] = 1.0
_maybe_remove(store, "df")
store.append("df", df[:2], data_columns=["B"])
store.append("df", df[2:])
tm.assert_frame_equal(store["df"], df)
# check that we have indices created
assert store._handle.root.df.table.cols.index.is_indexed is True
assert store._handle.root.df.table.cols.B.is_indexed is True
# data column searching
result = store.select("df", "B>0")
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select("df", "B>0 and index>df.index[3]")
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new["string"] = "foo"
df_new.loc[1:4, "string"] = np.nan
df_new.loc[5:6, "string"] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"])
result = store.select("df", "string='foo'")
expected = df_new[df_new.string == "foo"]
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert (
getattr(store.get_storer(key).table.description, name).itemsize
== size
)
with ensure_clean_store(setup_path) as store:
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"string": 30}
)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
check_col("df", "string", 30)
_maybe_remove(store, "df")
store.append(
"df", df_new, data_columns=["string"], min_itemsize={"values": 30}
)
check_col("df", "string", 30)
with ensure_clean_store(setup_path) as store:
df_new["string2"] = "foobarbah"
df_new["string_block1"] = "foobarbah1"
df_new["string_block2"] = "foobarbah2"
_maybe_remove(store, "df")
store.append(
"df",
df_new,
data_columns=["string", "string2"],
min_itemsize={"string": 30, "string2": 40, "values": 50},
)
check_col("df", "string", 30)
check_col("df", "string2", 40)
check_col("df", "values_block_1", 50)
with ensure_clean_store(setup_path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
df_new["string"] = "foo"
sl = df_new.columns.get_loc("string")
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = "bar"
df_new["string2"] = "foo"
sl = df_new.columns.get_loc("string2")
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = "bar"
_maybe_remove(store, "df")
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
result = store.select(
"df", "string='foo' and string2='foo' and A>0 and B<0"
)
expected = df_new[
(df_new.string == "foo")
& (df_new.string2 == "foo")
& (df_new.A > 0)
& (df_new.B < 0)
]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select("df", "string='foo' and string2='cool'")
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example
df_dc = df.copy()
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc["string2"] = "cool"
df_dc["datetime"] = Timestamp("20010102")
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ["A", "B", "datetime"]] = np.nan
_maybe_remove(store, "df_dc")
store.append(
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
)
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(setup_path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range("1/1/2000", periods=8)
df_dc = DataFrame(
np.random.randn(8, 3), index=index, columns=["A", "B", "C"]
)
df_dc["string"] = "foo"
df_dc.loc[4:6, "string"] = np.nan
df_dc.loc[7:9, "string"] = "bar"
df_dc.loc[:, ["B", "C"]] = df_dc.loc[:, ["B", "C"]].abs()
df_dc["string2"] = "cool"
# on-disk operations
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
result = store.select("df_dc", "B>0")
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
tm.assert_frame_equal(result, expected)
def test_create_table_index(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append(
"f2", df, index=["string"], data_columns=["string", "string2"]
)
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
with pytest.raises(TypeError):
store.create_table_index("f2")
def test_append_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.append("mi", df)
result = store.select("mi")
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select("mi", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
with ensure_clean_path("test.hdf") as path:
df.to_hdf(path, "df", format="table")
result = read_hdf(path, "df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self, setup_path):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")], names=["first", "second"]
)
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df", df)
tm.assert_frame_equal(
store["df"], expected, check_index_type=True, check_column_type=True
)
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
with pytest.raises(ValueError):
store.put("df2", df, format="table", data_columns=["A"])
with pytest.raises(ValueError):
store.put("df3", df, format="table", data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(setup_path) as store:
store.append("df2", df)
store.append("df2", df)
tm.assert_frame_equal(store["df2"], concat((df, df)))
# non_index_axes name
df = DataFrame(
np.arange(12).reshape(3, 4), columns=Index(list("ABCD"), name="foo")
)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(setup_path) as store:
store.put("df1", df, format="table")
tm.assert_frame_equal(
store["df1"], expected, check_index_type=True, check_column_type=True
)
def test_store_multiindex(self, setup_path):
# validate multi-index names
# GH 5527
with ensure_clean_store(setup_path) as store:
def make_index(names=None):
return MultiIndex.from_tuples(
[
(datetime.datetime(2013, 12, d), s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)
],
names=names,
)
# no names
_maybe_remove(store, "df")
df = DataFrame(np.zeros((12, 2)), columns=["a", "b"], index=make_index())
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# partial names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", None, None]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
# series
_maybe_remove(store, "s")
s = Series(np.zeros(12), index=make_index(["date", None, None]))
store.append("s", s)
xp = Series(np.zeros(12), index=make_index(["date", "level_1", "level_2"]))
tm.assert_series_equal(store.select("s"), xp)
# dup with column
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "a", "t"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# dup within level
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "date", "date"]),
)
with pytest.raises(ValueError):
store.append("df", df)
# fully names
_maybe_remove(store, "df")
df = DataFrame(
np.zeros((12, 2)),
columns=["a", "b"],
index=make_index(["date", "s", "t"]),
)
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
def test_select_columns_in_where(self, setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_mi_data_columns(self, setup_path):
# GH 14435
idx = pd.MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = pd.DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self, setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
with pytest.raises(TypeError):
store.select("df", columns=["A"])
with pytest.raises(TypeError):
store.select("df", where=[("columns=A")])
@td.xfail_non_writeable
def test_append_misc(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df, chunksize=1)
result = store.select("df")
tm.assert_frame_equal(result, df)
store.append("df1", df, expectedrows=10)
result = store.select("df1")
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(setup_path, mode="w") as store:
store.append("obj", obj, chunksize=c)
result = store.select("obj")
comparator(result, obj)
df = tm.makeDataFrame()
df["string"] = "foo"
df["float322"] = 1.0
df["float322"] = df["float322"].astype("float32")
df["bool"] = df["float322"] > 0
df["time1"] = Timestamp("20130101")
df["time2"] = Timestamp("20130102")
check(df, tm.assert_frame_equal)
# empty frame, GH4273
with ensure_clean_store(setup_path) as store:
# 0 len
df_empty = DataFrame(columns=list("ABC"))
store.append("df", df_empty)
with pytest.raises(KeyError, match="'No object named df in the file'"):
store.select("df")
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list("ABC"))
store.append("df", df)
tm.assert_frame_equal(store.select("df"), df)
store.append("df", df_empty)
tm.assert_frame_equal(store.select("df"), df)
# store
df = DataFrame(columns=list("ABC"))
store.put("df2", df)
tm.assert_frame_equal(store.select("df2"), df)
def test_append_raise(self, setup_path):
with ensure_clean_store(setup_path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df["invalid"] = [["a"]] * len(df)
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# multiple invalid columns
df["invalid2"] = [["a"]] * len(df)
df["invalid3"] = [["a"]] * len(df)
with pytest.raises(TypeError):
store.append("df", df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df["invalid"] = s
assert df.dtypes["invalid"] == np.object_
with pytest.raises(TypeError):
store.append("df", df)
# directly ndarray
with pytest.raises(TypeError):
store.append("df", np.arange(10))
# series directly
with pytest.raises(TypeError):
store.append("df", Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append("df", df)
df["foo"] = "foo"
with pytest.raises(ValueError):
store.append("df", df)
def test_table_index_incompatible_dtypes(self, setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
with pytest.raises(TypeError):
store.put("frame", df2, format="table", append=True)
def test_table_values_dtypes_roundtrip(self, setup_path):
with ensure_clean_store(setup_path) as store:
df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8")
store.append("df_f8", df1)
tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes)
df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8")
store.append("df_i8", df2)
tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes)
# incompatible dtype
with pytest.raises(ValueError):
store.append("df_i8", df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"])
store.append("df_f4", df1)
tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes)
assert df1.dtypes[0] == "float32"
# check with mixed dtypes
df1 = DataFrame(
{
c: Series(np.random.randint(5), dtype=c)
for c in ["float32", "float64", "int32", "int64", "int16", "int8"]
}
)
df1["string"] = "foo"
df1["float322"] = 1.0
df1["float322"] = df1["float322"].astype("float32")
df1["bool"] = df1["float32"] > 0
df1["time1"] = Timestamp("20130101")
df1["time2"] = Timestamp("20130102")
store.append("df_mixed_dtypes1", df1)
result = store.select("df_mixed_dtypes1").dtypes.value_counts()
result.index = [str(i) for i in result.index]
expected = Series(
{
"float32": 2,
"float64": 1,
"int32": 1,
"bool": 1,
"int16": 1,
"int8": 1,
"int64": 1,
"object": 1,
"datetime64[ns]": 2,
}
)
result = result.sort_index()
expected = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self, setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_unimplemented_dtypes_table_columns(self, setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
with pytest.raises(TypeError):
store.append("df1_{n}".format(n=n), df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
with pytest.raises(TypeError):
store.append("df_unimplemented", df)
@td.xfail_non_writeable
@pytest.mark.skipif(
LooseVersion(np.__version__) == LooseVersion("1.15.0"),
reason=(
"Skipping pytables test when numpy version is "
"exactly equal to 1.15.0: gh-22098"
),
)
def test_calendar_roundtrip_issue(self, setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
tm.assert_series_equal(result, s)
store.append("table", s)
result = store.select("table")
tm.assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self, setup_path):
# GH 17618
time = pd.Timestamp("2000-01-01 01:00:00", tz="US/Eastern")
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="fixed")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self, setup_path):
# GH 3577
# append timedelta
df = DataFrame(
dict(
A=Timestamp("20130101"),
B=[
Timestamp("20130101") + timedelta(days=i, seconds=10)
for i in range(10)
],
)
)
df["C"] = df["A"] - df["B"]
df.loc[3:5, "C"] = np.nan
with ensure_clean_store(setup_path) as store:
# table
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<100000")
tm.assert_frame_equal(result, df)
result = store.select("df", where="C<pd.Timedelta('-3D')")
tm.assert_frame_equal(result, df.iloc[3:])
result = store.select("df", "C<'-3D'")
tm.assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select("df", "C<'-500000s'")
result = result.dropna(subset=["C"])
tm.assert_frame_equal(result, df.iloc[6:])
result = store.select("df", "C<'-3.5D'")
result = result.iloc[1:]
tm.assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, "df2")
store.put("df2", df)
result = store.select("df2")
tm.assert_frame_equal(result, df)
def test_remove(self, setup_path):
with ensure_clean_store(setup_path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store["a"] = ts
store["b"] = df
_maybe_remove(store, "a")
assert len(store) == 1
tm.assert_frame_equal(df, store["b"])
_maybe_remove(store, "b")
assert len(store) == 0
# nonexistence
with pytest.raises(
KeyError, match="'No object named a_nonexistent_store in the file'"
):
store.remove("a_nonexistent_store")
# pathing
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "foo")
_maybe_remove(store, "b/foo")
assert len(store) == 1
store["a"] = ts
store["b/foo"] = df
_maybe_remove(store, "b")
assert len(store) == 1
# __delitem__
store["a"] = ts
store["b"] = df
del store["a"]
del store["b"]
assert len(store) == 0
def test_invalid_terms(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[0:4, "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
with pytest.raises(TypeError):
Term()
# more invalid
with pytest.raises(ValueError):
store.select("df", "df.index[3]")
with pytest.raises(SyntaxError):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(
path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']"
)
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
with pytest.raises(ValueError):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_same_name_scoping(self, setup_path):
with ensure_clean_store(setup_path) as store:
import pandas as pd
df = DataFrame(
np.random.randn(20, 2), index=pd.date_range("20130101", periods=20)
)
store.put("df", df, format="table")
expected = df[df.index > pd.Timestamp("20130105")]
import datetime # noqa
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select("df", "index>datetime.datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
result = store.select("df", "index>datetime(2013,1,5)")
tm.assert_frame_equal(result, expected)
def test_series(self, setup_path):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal, path=setup_path)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object))
self._check_roundtrip(
ts3, tm.assert_series_equal, path=setup_path, check_index_type=False
)
def test_float_index(self, setup_path):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
@td.xfail_non_writeable
def test_tuple_index(self, setup_path):
# GH #492
col = np.arange(10)
idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
self._check_roundtrip(DF, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_index_types(self, setup_path):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(
l, r, check_dtype=True, check_index_type=True, check_series_type=True
)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
with catch_warnings(record=True):
ser = Series(values, [0, "y"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, ["y", 0])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [datetime.date.today(), "a"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1.23, "b"])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func, path=setup_path)
ser = Series(
values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]
)
self._check_roundtrip(ser, func, path=setup_path)
def test_timeseries_preepoch(self, setup_path):
dr = bdate_range("1/1/1940", "1/1/1960")
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal, path=setup_path)
except OverflowError:
pytest.skip("known failer on some windows platforms")
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_frame(self, compression, setup_path):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
self._check_roundtrip(
df, tm.assert_frame_equal, path=setup_path, compression=compression
)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(
tdf, tm.assert_frame_equal, path=setup_path, compression=compression
)
with ensure_clean_store(setup_path) as store:
# not consolidated
df["foo"] = np.random.randn(len(df))
store["df"] = df
recons = store["df"]
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
def test_empty_series_frame(self, setup_path):
s0 = Series(dtype=object)
s1 = Series(name="myseries", dtype=object)
df0 = DataFrame()
df1 = DataFrame(index=["a", "b", "c"])
df2 = DataFrame(columns=["d", "e", "f"])
self._check_roundtrip(s0, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(s1, tm.assert_series_equal, path=setup_path)
self._check_roundtrip(df0, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"dtype", [np.int64, np.float64, np.object, "m8[ns]", "M8[ns]"]
)
def test_empty_series(self, dtype, setup_path):
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal, path=setup_path)
def test_can_serialize_dates(self, setup_path):
rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
def test_store_hierarchical(self, setup_path):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo", "bar"],
)
frame = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
self._check_roundtrip(frame, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path)
# check that the names are stored
with ensure_clean_store(setup_path) as store:
store["frame"] = frame
recons = store["frame"]
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self, setup_path):
df = tm.makeDataFrame()
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store["frame"] = df
recons = store["frame"]
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self, setup_path):
# GH 13884
df = pd.DataFrame({"A": [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize("UTC")
df.index.name = "foo"
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
recons = store["frame"]
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize("table_format", ["table", "fixed"])
def test_store_index_name_numpy_str(self, table_format, setup_path):
# GH #13492
idx = pd.Index(
pd.to_datetime([datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)]),
name="cols\u05d2",
)
idx1 = pd.Index(
pd.to_datetime([datetime.date(2010, 1, 1), datetime.date(2010, 1, 2)]),
name="rows\u05d0",
)
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format=table_format)
df2 = read_hdf(path, "df")
tm.assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == str
assert type(df2.columns.name) == str
def test_store_series_name(self, setup_path):
df = tm.makeDataFrame()
series = df["A"]
with ensure_clean_store(setup_path) as store:
store["series"] = series
recons = store["series"]
tm.assert_series_equal(recons, series)
@td.xfail_non_writeable
@pytest.mark.parametrize(
"compression", [False, pytest.param(True, marks=td.skip_if_windows_python_3)]
)
def test_store_mixed(self, compression, setup_path):
def _make_one():
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["int1"] = 1
df["int2"] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal, path=setup_path)
self._check_roundtrip(df2, tm.assert_frame_equal, path=setup_path)
with ensure_clean_store(setup_path) as store:
store["obj"] = df1
tm.assert_frame_equal(store["obj"], df1)
store["obj"] = df2
tm.assert_frame_equal(store["obj"], df2)
# check that can store Series of all of these types
self._check_roundtrip(
df1["obj1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["bool1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
self._check_roundtrip(
df1["int1"],
tm.assert_series_equal,
path=setup_path,
compression=compression,
)
@pytest.mark.filterwarnings(
"ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
)
def test_select_with_dups(self, setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_overwrite_node(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store["a"] = ts
tm.assert_series_equal(store["a"], ts)
def test_select(self, setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self, setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
dict(ts=bdate_range("2012-01-01", periods=300), A=np.random.randn(300))
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select(
"df", "boolv == {v!s}".format(v=v), columns=["A", "boolv"]
)
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
dict(
A=np.random.rand(20),
B=np.random.rand(20),
index=np.arange(20, dtype="f8"),
)
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols=range(11), values=range(11)), dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self, setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
dict(
ts=bdate_range("2012-01-01", periods=300),
A=np.random.randn(300),
B=range(300),
users=["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ ["a{i:03d}".format(i=i) for i in range(100)],
)
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
"df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']"
)
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + ["a{i:03d}".format(i=i) for i in range(60)]
result = store.select(
"df", "ts>=Timestamp('2012-02-01') and users=selector"
)
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self, setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '{end_dt}'".format(end_dt=end_dt)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self, setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '{beg_dt}'".format(beg_dt=beg_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '{end_dt}'".format(end_dt=end_dt)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '{beg_dt}' & index <= '{end_dt}'".format(
beg_dt=beg_dt, end_dt=end_dt
)
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[
(expected.index >= beg_dt) & (expected.index <= end_dt)
]
| tm.assert_frame_equal(rexpected, result) | pandas.util.testing.assert_frame_equal |
#!/usr/bin/env python3
import os
import glob
import pandas as pd
from sklearn import metrics
import argparse
def oneline(dir, fname):
try:
f = open(os.path.join(dir, fname), 'rb')
line = f.readline().rstrip()
return line
except:
return ''
def parse_predictions_file(f, threshold, val="y", pred="pred_avg"):
df = | pd.read_csv(f) | pandas.read_csv |
import os
import logging
import datetime
from pathlib import Path
from collections import OrderedDict
import numpy as np
import pytest
from pandas import DataFrame
import astropy.units as u
from astropy.io import fits
from astropy.table import Table
from astropy.time import TimeDelta
import sunpy.io
import sunpy.net.attrs as a
import sunpy.timeseries
from sunpy.data.test import get_test_filepath, rootdir, test_data_filenames
from sunpy.net import Fido
from sunpy.time import parse_time
from sunpy.util import SunpyUserWarning
from sunpy.util.datatype_factory_base import NoMatchError
from sunpy.util.metadata import MetaDict
eve_filepath = get_test_filepath('EVE_L0CS_DIODES_1m_truncated.txt')
eve_many_filepath = [f for f in test_data_filenames()
if f.parents[0].relative_to(f.parents[1]).name == 'eve']
goes_filepath = get_test_filepath('go1520110607.fits')
psp_filepath = get_test_filepath('psp_fld_l2_mag_rtn_1min_20200104_v02.cdf')
swa_filepath = get_test_filepath('solo_L1_swa-pas-mom_20200706_V01.cdf')
fermi_gbm_filepath = get_test_filepath('gbm.fits')
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_concatenate_same_source():
# Test making a TimeSeries that is the concatenation of multiple files
ts_from_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE', concatenate=True)
assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
ts_from_folder = sunpy.timeseries.TimeSeries(
eve_many_filepath[0].parent, source='EVE', concatenate=True)
assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
# text the two methods get identical dataframes
assert ts_from_list == ts_from_folder
# test the frames have correct headings/keys (correct concatenation axis)
ts_from_list.columns == sunpy.timeseries.TimeSeries(
eve_many_filepath[0], source='EVE', concatenate=True).columns
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_concatenate_different_source():
# Test making a TimeSeries that is the concatenation of multiple files
ts_from_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE', concatenate=True)
assert isinstance(ts_from_list, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
ts_from_folder = sunpy.timeseries.TimeSeries(
eve_many_filepath[0].parent, source='EVE', concatenate=True)
assert isinstance(ts_from_folder, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
# text the two methods get identical dataframes
assert ts_from_list == ts_from_folder
# test the frames have correct headings/keys (correct concatenation axis)
ts_from_list.columns == sunpy.timeseries.TimeSeries(
eve_many_filepath[0], source='EVE', concatenate=True).columns
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_list_of_ts():
# Test making a list TimeSeries from multiple files
ts_list = sunpy.timeseries.TimeSeries(eve_many_filepath, source='EVE')
assert isinstance(ts_list, list)
for ts in ts_list:
assert isinstance(ts, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_from_glob():
# Test making a TimeSeries from a glob
ts_from_glob = sunpy.timeseries.TimeSeries(os.path.join(
rootdir, "eve", "*"), source='EVE', concatenate=True)
assert isinstance(ts_from_glob, sunpy.timeseries.sources.eve.EVESpWxTimeSeries)
@pytest.mark.filterwarnings('ignore:Unknown units')
def test_factory_generate_from_pathlib():
# Test making a TimeSeries from a : pathlib.PosixPath
ts_from_pathlib = sunpy.timeseries.TimeSeries(Path(fermi_gbm_filepath),
source="GBMSummary")
assert isinstance(ts_from_pathlib, sunpy.timeseries.sources.fermi_gbm.GBMSummaryTimeSeries)
@pytest.mark.remote_data
def test_from_url():
# This is the same PSP file we have in our test data, but accessed from a URL
url = ('https://spdf.gsfc.nasa.gov/pub/data/psp/fields/l2/mag_rtn_1min/2020/'
'psp_fld_l2_mag_rtn_1min_20200104_v02.cdf')
ts = sunpy.timeseries.TimeSeries(url)
assert isinstance(ts[0], sunpy.timeseries.GenericTimeSeries)
assert isinstance(ts[1], sunpy.timeseries.GenericTimeSeries)
def test_read_cdf():
ts_psp = sunpy.timeseries.TimeSeries(psp_filepath)
assert len(ts_psp) == 2
ts = ts_psp[0]
assert ts.columns == ['psp_fld_l2_mag_RTN_1min_0',
'psp_fld_l2_mag_RTN_1min_1',
'psp_fld_l2_mag_RTN_1min_2']
assert ts.quantity('psp_fld_l2_mag_RTN_1min_0').unit == u.nT
assert len(ts.quantity('psp_fld_l2_mag_RTN_1min_0')) == 118
ts = ts_psp[1]
assert ts.columns == ['psp_fld_l2_quality_flags']
assert ts.quantity('psp_fld_l2_quality_flags').unit == u.dimensionless_unscaled
assert len(ts.quantity('psp_fld_l2_quality_flags')) == 1440
@pytest.mark.remote_data
def test_read_cdf_empty_variable():
# This tests that:
# - A CDF file with an empty column can be read
# - Unknown unit handling works as expected
result = sunpy.net.Fido.search(a.Time('2020-01-01', '2020-01-02'),
a.cdaweb.Dataset('AC_H6_SWI'))
filename = Fido.fetch(result[0, 0])
# Temporarily reset sunpy.io.cdf registry of known unit conversions
import sunpy.io.cdf as sunpy_cdf
known_units = sunpy_cdf._known_units
sunpy_cdf._known_units = {}
with pytest.warns(SunpyUserWarning, match='Assigning dimensionless units'):
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.dimensionless_unscaled
# Put back known unit registry, and check that units are recognised
sunpy_cdf._known_units = known_units
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.cm**-3
# Reset again to check that registring units via. astropy works too
sunpy_cdf._known_units = {}
u.add_enabled_units([u.def_unit('#/cm^3', represents=u.cm**-3)])
ts = sunpy.timeseries.TimeSeries(filename)
assert ts.quantity('nH').unit == u.cm**-3
sunpy_cdf._known_units = known_units
def test_read_empty_cdf(caplog):
with caplog.at_level(logging.DEBUG, logger='sunpy'):
ts_empty = sunpy.timeseries.TimeSeries(swa_filepath)
assert ts_empty == []
assert "No data found in file" in caplog.text
assert "solo_L1_swa-pas-mom_20200706_V01.cdf" in caplog.text
def test_meta_from_fits_header():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24*60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
units = {'intensity': u.W/u.m**2}
data = DataFrame(intensity, index=times, columns=['intensity'])
# Use a FITS file HDU using sunpy.io
hdulist = sunpy.io.read_file(goes_filepath)
meta = hdulist[0].header
meta_md = MetaDict(OrderedDict(meta))
ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta, units)
ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md, units)
assert ts_hdu_meta == ts_md_meta
# Use a FITS file HDU using astropy.io
hdulist = fits.open(goes_filepath)
meta = hdulist[0].header
hdulist.close()
meta_md = MetaDict(sunpy.io.header.FileHeader(meta))
ts_hdu_meta = sunpy.timeseries.TimeSeries(data, meta, units)
ts_md_meta = sunpy.timeseries.TimeSeries(data, meta_md, units)
assert ts_hdu_meta == ts_md_meta
def test_generic_construction_basic():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
# Create normal TS from dataframe and check
ts_generic = sunpy.timeseries.TimeSeries(data, meta, units)
assert isinstance(ts_generic, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_generic.columns == ['intensity']
assert ts_generic.units == units
assert ts_generic.meta.metadata[0][2] == meta
# Create TS using a tuple of values
ts_tuple = sunpy.timeseries.TimeSeries(((data, meta, units),))
assert isinstance(ts_tuple, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_generic == ts_tuple
def test_generic_construction_basic_omitted_details():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
# Create TS omitting units input arguments
with pytest.warns(SunpyUserWarning, match='Unknown units for intensity'):
ts_1 = sunpy.timeseries.TimeSeries(data, meta)
assert isinstance(ts_1, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_1.columns == ['intensity']
assert ts_1.units == OrderedDict([('intensity', u.dimensionless_unscaled)])
assert ts_1.meta.metadata[0][2] == meta
ts_2 = sunpy.timeseries.TimeSeries(data, units)
assert isinstance(ts_2, sunpy.timeseries.timeseriesbase.GenericTimeSeries)
assert ts_2.columns == ['intensity']
assert ts_2.units == units
assert ts_2.meta.metadata[0][2] == MetaDict()
def test_generic_construction_basic_different_meta_types():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
tr = sunpy.time.TimeRange(times[0], times[-1])
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity, index=times, columns=['intensity'])
units = OrderedDict([('intensity', u.W/u.m**2)])
meta_md = MetaDict({'key': 'value'})
meta_di = {'key': 'value'}
meta_od = OrderedDict({'key': 'value'})
meta_obj = sunpy.timeseries.TimeSeriesMetaData(timerange=tr, colnames=['GOES'],
meta=MetaDict({'key': 'value'}))
# Create TS using different dictionary meta types
ts_md = sunpy.timeseries.TimeSeries(data, meta_md, units)
ts_di = sunpy.timeseries.TimeSeries(data, meta_di, units)
ts_od = sunpy.timeseries.TimeSeries(data, meta_od, units)
ts_obj = sunpy.timeseries.TimeSeries(data, meta_obj, units)
assert ts_md == ts_di == ts_od == ts_obj
assert ts_md.meta.metadata[0][2] == ts_di.meta.metadata[0][2] == ts_od.meta.metadata[0][2] == ts_obj.meta.metadata[0][2]
def test_generic_construction_ts_list():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity1 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
intensity2 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = DataFrame(intensity1, index=times, columns=['intensity'])
data2 = DataFrame(intensity2, index=times, columns=['intensity2'])
units = OrderedDict([('intensity', u.W/u.m**2)])
units2 = OrderedDict([('intensity2', u.W/u.m**2)])
meta = MetaDict({'key': 'value'})
meta2 = MetaDict({'key2': 'value2'})
# Create TS individually
ts_1 = sunpy.timeseries.TimeSeries(data, meta, units)
ts_2 = sunpy.timeseries.TimeSeries(data2, meta2, units2)
# Create TS list using
ts_list = sunpy.timeseries.TimeSeries(data, meta, units, data2, meta2, units2)
assert isinstance(ts_list, list)
assert len(ts_list) == 2
assert ts_list[0] == ts_1
assert ts_list[1] == ts_2
# Create TS using a tuple
ts_list2 = sunpy.timeseries.TimeSeries(((data, meta, units), (data2, meta2, units2)))
assert ts_list == ts_list2
def test_generic_construction_concatenation():
# Generate the data and the corrisponding dates
base = parse_time(datetime.datetime.today())
times = base - TimeDelta(np.arange(24 * 60)*u.minute)
intensity1 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
intensity2 = np.sin(np.arange(0, 12 * np.pi, ((12 * np.pi) / (24*60))))
# Create the data DataFrame, header MetaDict and units OrderedDict
data = | DataFrame(intensity1, index=times, columns=['intensity']) | pandas.DataFrame |
import sys
import unittest
import subprocess
import time
import logging
import numpy as np
import pandas as pd
import swifter
from tqdm.auto import tqdm
from psutil import cpu_count
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)-8s.%(msecs)03d %(levelname)-8s %(name)s:%(lineno)-3s %(message)s")
ch.setFormatter(formatter)
LOG.addHandler(ch)
def math_vec_square(x):
return x ** 2
def math_foo(x, compare_to=1):
return x ** 2 if x < compare_to else x ** (1 / 2)
def math_vec_multiply(row):
return row["x"] * row["y"]
def math_agg_foo(row):
return row.sum() - row.min()
def text_foo(row):
if row["letter"] == "A":
return row["value"] * 3
elif row["letter"] == "B":
return row["value"] ** 3
elif row["letter"] == "C":
return row["value"] / 3
elif row["letter"] == "D":
return row["value"] ** (1 / 3)
elif row["letter"] == "E":
return row["value"]
class TestSwifter(unittest.TestCase):
def assertSeriesEqual(self, a, b, msg):
try:
pd.testing.assert_series_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def assertDataFrameEqual(self, a, b, msg):
try:
pd.testing.assert_frame_equal(a, b)
except AssertionError as e:
raise self.failureException(msg) from e
def setUp(self):
LOG.info(f"Version {swifter.__version__}")
self.addTypeEqualityFunc(pd.Series, self.assertSeriesEqual)
self.addTypeEqualityFunc(pd.DataFrame, self.assertDataFrameEqual)
self.ncores = cpu_count()
def test_set_npartitions(self):
LOG.info("test_set_npartitions")
for swifter_df, set_npartitions, expected in zip(
[
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
],
[None, 1000, 1001, 1002],
[cpu_count() * 2, 1000, 1001, 1002],
):
before = swifter_df._npartitions
swifter_df.set_npartitions(set_npartitions)
actual = swifter_df._npartitions
self.assertEqual(actual, expected)
if set_npartitions is not None:
self.assertNotEqual(before, actual)
def test_set_dask_threshold(self):
LOG.info("test_set_dask_threshold")
expected = 1000
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
]:
before = swifter_df._dask_threshold
swifter_df.set_dask_threshold(expected)
actual = swifter_df._dask_threshold
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_set_dask_scheduler(self):
LOG.info("test_set_dask_scheduler")
expected = "my-scheduler"
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
]:
before = swifter_df._scheduler
swifter_df.set_dask_scheduler(expected)
actual = swifter_df._scheduler
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_disable_progress_bar(self):
LOG.info("test_disable_progress_bar")
expected = False
for swifter_df in [
pd.DataFrame().swifter,
pd.Series().swifter,
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.rolling("1d"),
pd.DataFrame(
{"x": np.arange(0, 10)}, index=pd.date_range("2019-01-1", "2020-01-1", periods=10)
).swifter.resample("3T"),
]:
before = swifter_df._progress_bar
swifter_df.progress_bar(expected)
actual = swifter_df._progress_bar
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_allow_dask_on_strings(self):
LOG.info("test_allow_dask_on_strings")
expected = True
swifter_df = pd.DataFrame().swifter
before = swifter_df._allow_dask_on_strings
swifter_df.allow_dask_on_strings(expected)
actual = swifter_df._allow_dask_on_strings
self.assertEqual(actual, expected)
self.assertNotEqual(before, actual)
def test_stdout_redirected(self):
LOG.info("test_stdout_redirected")
print_messages = subprocess.check_output(
[
sys.executable,
"-c",
"import pandas as pd; import numpy as np; import swifter; "
+ "df = pd.DataFrame({'x': np.random.normal(size=4)}, dtype='float32'); "
+ "df.swifter.progress_bar(enable=False).apply(lambda x: print(x.values))",
],
stderr=subprocess.STDOUT,
)
self.assertEqual(len(print_messages.decode("utf-8").rstrip("\n").split("\n")), 1)
def test_apply_on_empty_series(self):
LOG.info("test_apply_on_empty_series")
series = pd.Series()
pd_val = series.apply(math_foo, compare_to=1)
swifter_val = series.swifter.apply(math_foo, compare_to=1)
self.assertEqual(pd_val, swifter_val) # equality test
def test_apply_on_empty_dataframe(self):
LOG.info("test_apply_on_empty_dataframe")
df = pd.DataFrame(columns=["x", "y"])
pd_val = df.apply(math_vec_multiply, axis=1)
swifter_val = df.swifter.apply(math_vec_multiply, axis=1)
self.assertEqual(pd_val, swifter_val) # equality test
def test_applymap_on_empty_dataframe(self):
LOG.info("test_applymap_on_empty_dataframe")
df = pd.DataFrame(columns=["x", "y"])
pd_val = df.applymap(math_vec_square)
swifter_val = df.swifter.applymap(math_vec_square)
self.assertEqual(pd_val, swifter_val) # equality test
def test_rolling_apply_on_empty_dataframe(self):
LOG.info("test_rolling_apply_on_empty_dataframe")
df = pd.DataFrame(columns=["x", "y"])
pd_val = df.rolling(1).apply(math_agg_foo, raw=True)
swifter_val = df.swifter.rolling(1).apply(math_agg_foo, raw=True)
self.assertEqual(pd_val, swifter_val) # equality test
def test_resample_apply_on_empty_dataframe(self):
LOG.info("test_resample_apply_on_empty_dataframe")
df = pd.DataFrame(columns=["x", "y"], index=pd.date_range(start="2020/01/01", periods=0))
pd_val = df.resample("1d").apply(math_agg_foo)
swifter_val = df.swifter.resample("1d").apply(math_agg_foo)
self.assertEqual(pd_val, swifter_val) # equality test
def test_nonvectorized_math_apply_on_small_series(self):
LOG.info("test_nonvectorized_math_apply_on_small_series")
df = pd.DataFrame({"x": np.random.normal(size=1000)})
series = df["x"]
tqdm.pandas(desc="Pandas Vec math apply ~ Series")
pd_val = series.progress_apply(math_foo, compare_to=1)
swifter_val = series.swifter.progress_bar(desc="Vec math apply ~ Series").apply(math_foo, compare_to=1)
self.assertEqual(pd_val, swifter_val) # equality test
def test_nonvectorized_math_apply_on_small_series_no_progress_bar(self):
LOG.info("test_nonvectorized_math_apply_on_small_series_no_progress_bar")
df = pd.DataFrame({"x": np.random.normal(size=1000)})
series = df["x"]
pd_val = series.apply(math_foo, compare_to=1)
swifter_val = series.swifter.progress_bar(enable=False).apply(math_foo, compare_to=1)
self.assertEqual(pd_val, swifter_val) # equality test
def test_vectorized_math_apply_on_large_series(self):
LOG.info("test_vectorized_math_apply_on_large_series")
df = pd.DataFrame({"x": np.random.normal(size=1_000_000)})
series = df["x"]
tqdm.pandas(desc="Pandas Vec math apply ~ Series")
start_pd = time.time()
pd_val = series.progress_apply(math_vec_square)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = series.swifter.progress_bar(desc="Vec math apply ~ Series").apply(math_vec_square, axis=0)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_series(self):
LOG.info("test_nonvectorized_math_apply_on_large_series")
df = pd.DataFrame({"x": np.random.normal(size=10_000_000)})
series = df["x"]
tqdm.pandas(desc="Pandas Nonvec math apply ~ Series")
start_pd = time.time()
pd_val = series.progress_apply(math_foo, compare_to=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = series.swifter.progress_bar(desc="Nonvec math apply ~ Series").apply(math_foo, compare_to=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_small_dataframe(self):
LOG.info("test_nonvectorized_math_apply_on_small_dataframe")
df = pd.DataFrame({"x": np.random.normal(size=1000), "y": np.random.uniform(size=1000)})
tqdm.pandas(desc="Pandas Nonvec math apply ~ DF")
pd_val = df.progress_apply(math_agg_foo)
swifter_val = df.swifter.progress_bar(desc="Vec math apply ~ DF").apply(math_agg_foo)
self.assertEqual(pd_val, swifter_val) # equality test
def test_nonvectorized_math_apply_on_small_dataframe_no_progress_bar(self):
LOG.info("test_nonvectorized_math_apply_on_small_dataframe_no_progress_bar")
df = pd.DataFrame({"x": np.random.normal(size=1000), "y": np.random.uniform(size=1000)})
pd_val = df.apply(math_agg_foo)
swifter_val = df.swifter.progress_bar(enable=False).apply(math_agg_foo)
self.assertEqual(pd_val, swifter_val) # equality test
def test_vectorized_math_apply_on_large_dataframe(self):
LOG.info("test_vectorized_math_apply_on_large_dataframe")
df = pd.DataFrame({"x": np.random.normal(size=1_000_000), "y": np.random.uniform(size=1_000_000)})
tqdm.pandas(desc="Pandas Vec math apply ~ DF")
start_pd = time.time()
pd_val = df.progress_apply(math_vec_multiply, axis=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.progress_bar(desc="Vec math apply ~ DF").apply(math_vec_multiply, axis=1)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_dataframe_broadcast(self):
LOG.info("test_nonvectorized_math_apply_on_large_dataframe_broadcast")
df = pd.DataFrame({"x": np.random.normal(size=1_000_000), "y": np.random.uniform(size=1_000_000)})
tqdm.pandas(desc="Pandas Nonvec math apply + broadcast ~ DF")
start_pd = time.time()
pd_val = df.progress_apply(math_agg_foo, axis=1, result_type="broadcast")
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.progress_bar(desc="Nonvec math apply + broadcast ~ DF").apply(
math_agg_foo, axis=1, result_type="broadcast"
)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_large_dataframe_reduce(self):
LOG.info("test_nonvectorized_math_apply_on_large_dataframe_reduce")
df = pd.DataFrame({"x": np.random.normal(size=1_000_000), "y": np.random.uniform(size=1_000_000)})
tqdm.pandas(desc="Pandas Nonvec math apply + reduce ~ DF")
start_pd = time.time()
pd_val = df.progress_apply(math_agg_foo, axis=1, result_type="reduce")
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = df.swifter.progress_bar(desc="Nonvec math apply + reduce ~ DF").apply(
math_agg_foo, axis=1, result_type="reduce"
)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_text_apply_on_large_dataframe(self):
LOG.info("test_nonvectorized_text_apply_on_large_dataframe")
df = pd.DataFrame({"letter": ["A", "B", "C", "D", "E"] * 200_000, "value": np.random.normal(size=1_000_000)})
tqdm.pandas(desc="Pandas Nonvec text apply ~ DF")
start_pd = time.time()
pd_val = df.progress_apply(text_foo, axis=1)
end_pd = time.time()
pd_time = end_pd - start_pd
start_swifter = time.time()
swifter_val = (
df.swifter.allow_dask_on_strings(True).progress_bar(desc="Nonvec text apply ~ DF").apply(text_foo, axis=1)
)
end_swifter = time.time()
swifter_time = end_swifter - start_swifter
self.assertEqual(pd_val, swifter_val) # equality test
if self.ncores > 1: # speed test
self.assertLess(swifter_time, pd_time)
def test_nonvectorized_math_apply_on_small_rolling_dataframe(self):
LOG.info("test_nonvectorized_math_apply_on_small_rolling_dataframe")
df = pd.DataFrame({"x": np.arange(0, 1000)}, index= | pd.date_range("2019-01-1", "2020-01-1", periods=1000) | pandas.date_range |
# Name: train model file
# Command: python rtraindecay.py 0 0.5
# Meaning of Command above: train model on without user 0 with decaying rate 0.5
# Author: <NAME>
# ======================
import pandas as pd
import sys
import numpy as np
import torch
from mynet import network
from copy import deepcopy
initdecay = float(sys.argv[2]) # the decaying rate
print("decaying:",initdecay)
def mytokenizer(row):
return row['text'].lower().split()
# using decaying rate to build a vector of current input (idea From Dr.Ware)
def decaytransform(text,word2id,st2int,lastone, decay=initdecay):
textsplit = text.lower().split()
tmp = lastone*decay
for x in textsplit:
tmp[word2id[x]] = 1
eachvec = tmp
return eachvec
def builddata(df,word2id, st2int,batch_size=3): # build input text data into vector and convert label to id
trainset =[]
stack = []
for index, row in df.iterrows():
if row['usr'] not in stack: # the first one direct use decay algorithm
inputsvec = decaytransform(row['text'],word2id,st2int,np.zeros(len(word2id)))
stack.append(row['usr'])
else:
inputsvec = decaytransform(row['text'],word2id,st2int,trainset[-1]['inputs'])
labelid = st2int[row['label']] # get label id
trainset.append({"inputs":inputsvec,"label":labelid}) # get current input vector and label id
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,shuffle=True, num_workers=2)
return trainloader
def trainit(trainloader,net,epochs=100): # training
optimizer = torch.optim.Adam(net.parameters(), lr=1e-6) # initiate optimizer
minloss =10000000000000000
for epoch in range(epochs): # loop over the dataset multiple times
print("epoch:",epoch)
running_loss = 0.0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs = data["inputs"]
inputs = inputs.float()
label = data["label"]
label = label
optimizer.zero_grad()
# forward + backward + optimize
outputs, loss= net(inputs,labels=label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
running_loss += loss.item()
if i % 50 == 49: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
#running_loss = 0.0
if running_loss <minloss:
minloss = running_loss
print("minloss:",minloss) # keep updating the min loss
#torch.save(the_model.state_dict(), PATH)
bestmodel = deepcopy(net.state_dict()) # get the best model with minimum loss
torch.save(bestmodel, "model_clssave/bestmodel"+str(testusr)+"_"+str(initdecay)+".pt")
print('Finished Training')
testusr = sys.argv[1] # get the test user
obs_df = | pd.read_csv("clssave/obtext.csv") | pandas.read_csv |
import string
import pandas as pd
import sqlite3
import re
from urllib.request import urlopen
from datetime import datetime
from bs4 import BeautifulSoup
from fundamentus import get_data
from tqdm import tqdm
from exception_util import exception, create_logger, retry
# Create instances of loggers
cvm_logger = create_logger('cvm_logger')
result_logger = create_logger('result_logger')
cvm2symbol_logger = create_logger('cvm2symbol_logger')
price_logger = create_logger('price_logger')
@retry()
@exception(cvm_logger)
def cvm():
"""
Get registration of all companies listed in CVM.
This function is a crawler which get all registration information from companies on cvmweb page.cvmweb
Parameters
----------
None
Returns
-------
DataFrame
The dataframe with fields ['cnpj', 'name', 'type', 'cvm_code', 'situation']
"""
# Define url base
url = 'http://cvmweb.cvm.gov.br/SWB/Sistemas/SCW/CPublica/CiaAb/FormBuscaCiaAbOrdAlf.aspx?LetraInicial='
# Get alphanum uppercase set to use in page index
alphanum =string.ascii_lowercase.upper() + ''.join(list(map(str,range(10))))
# Attribute values to identify table lines of interest
colors = ['Cornsilk','#FAEFCA']
# Loop through index pages and append registration information to data list
data = list()
#for letra_inicial in tqdm(alphanum, desc='Reading companies', unit='tabs'):
for letra_inicial in alphanum:
# get html
with urlopen(url+f'{letra_inicial}') as html:
soup = BeautifulSoup(html, 'html.parser')
try:
# loop through table lines retrieving fields values
for row in soup.find_all('tr', bgcolor=True):
row_tup = tuple()
# check the attribute matching
if row['bgcolor'] in colors:
for field in row.find_all('td'):
row_tup += (field.get_text(),)
data.append(row_tup)
except:
continue
# Store data in dataframe
columns = ['cnpj', 'name', 'type', 'cvm_code', 'situation']
df = pd.DataFrame(data, columns=columns)
df['cvm_code'] = df['cvm_code'].apply(int)
return df
@retry()
@exception(cvm2symbol_logger)
def cvm2symbol(cvm_codes, cvm_prices_and_liq):
"""
Get most relevant symbol price with cvm_code information
This function is a crawler which get all symbols from companies listed in cvm and then retrieve between the symbols with same cvm_code the one with best liq.
Parameters
----------
cvm_codes : list or numpy.array
List of cvm_codes
cvm_prices_and_liq : DataFrame or numpy.ndarray
Table indexed by symbol name and with price and liq info
Returns
-------
DataFrame
The dataframe with fields ['cvm_code', 'symbol', 'price', 'date']
"""
# Define cvm symbols source url
url = 'http://bvmf.bmfbovespa.com.br/pt-br/mercados/acoes/empresas/ExecutaAcaoConsultaInfoEmp.asp?CodCVM='
# Get symbols at url entering adding cmv_code to query
cvm_symbol = []
for code in tqdm(cvm_codes, desc='Reading prices', unit='codes'):
#for code in cvm_codes:
with urlopen(url+f'{code}') as html:
soup = BeautifulSoup(html, 'html.parser')
liq = .0
symbol = None
# Take the symbol with best liq for this cvm_code
for row in soup.find_all('a', "LinkCodNeg"):
tmp_symbol = row.get_text().strip()
# Evaluate if symbol exists
if tmp_symbol in cvm_prices_and_liq.index:
tmp_liq = convertNum(cvm_prices_and_liq.loc[tmp_symbol].liq)
if tmp_liq < liq:
continue
liq = tmp_liq
symbol = tmp_symbol
# Skip when no symbol
if symbol:
cvm_symbol.append((code, symbol, pd.to_datetime(cvm_prices_and_liq.loc[symbol].date)))
return pd.DataFrame(cvm_symbol, columns=['cvm_code', 'symbol', 'date'])
@retry()
@exception(result_logger)
def get_result():
"""
Get a table with cotacao and liq info from fundamentus page.
Parameters
----------
None
Returns
-------
DataFrame
The dataframe with fields ['symbol', 'price', 'liq', 'date']
"""
global lista, dia
# Use get_data from fundamentus to get list of stats by symbol
resultado = []
lista, dia = dict(get_data()), datetime.strftime(datetime.today(), '%d')
# Save day of update
date = datetime.strftime(datetime.today(), '%d-%m-%y %H:%M:%S')
# Select just cotaco and liq values fields.
#for key, value in tqdm(lista.items(), desc='Retrieving info', unit='registers'):
for key, value in lista.items():
resultado.append((key, value['cotacao'], value['Liq.2m.'], date))
return pd.DataFrame(resultado, columns=['symbol', 'price', 'liq', 'date'])
@retry()
@exception(price_logger)
def get_price(conn, cvm_prices_and_liq):
"""
Get price from CVM companies by symbol.
This function gets prices from a table with symbols an prices then merge it with cvm code information.
Parameters
----------
db : str
SQLAlchemy connection to db
cvm_prices_and_liq : DataFrame or numpy.ndarray
Table indexed by symbol name and with price and liq info
Returns
-------
DataFrame
The dataframe with fields ['cvm_code', 'symbol', 'price', 'date']
"""
# Get registers table
print('Reading companies symbols.')
columns = ['cvm_code', 'symbol', 'date']
result = conn.execute(f'SELECT * FROM cvm_dfps.cvm2symbol;').fetchall()
reg = pd.DataFrame(result, columns=columns)
cvm_price = []
#for i in tqdm(range(reg.shape[0]), desc='Reading prices', unit='codes'):
for i in range(reg.shape[0]):
symbol = reg['symbol'][i]
# Evaluate if symbol exists
if symbol not in cvm_prices_and_liq.index:
print(f'{symbol} deu pau')
continue
# Append to n-list
cvm_price.append((reg['cvm_code'][i], symbol, convertNum(cvm_prices_and_liq.loc[symbol].price), pd.to_datetime(cvm_prices_and_liq.loc[symbol].date)))
return pd.DataFrame(cvm_price, columns=['cvm_code', 'symbol', 'price', 'date'])
def convertNum(number_string):
return float(re.sub(',', '.', re.sub('\.', '', number_string)))
if __name__ == '__main__':
# test cvm2symbol
lst = ['906', '9512']
data = [
['BBDC4', '2.00', '5', pd.to_datetime('10-04-2018')],
['BBDC3', '3.00', '6', pd.to_datetime('10-04-2018')],
['PETR3', '5.00', '5', pd.to_datetime('10-04-2018')],
['PETR4', '6.00', '6', pd.to_datetime('10-04-2018')],
]
df = | pd.DataFrame(data, columns=['symbol', 'price', 'liq', 'date']) | pandas.DataFrame |
import os
import sys
module_path = os.path.abspath(os.path.join('.'))
if module_path not in sys.path:
sys.path.append(module_path)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
hippocampusAreas = {
'CA1' : 1,
'CA2' : 2,
'CA3' : 3,
'DG' : 4,
'Outer' : 0
}
def loadFromFile(fileName = "") :
if fileName == "" :
return
unprocessedDataSet = pd.read_csv(fileName)
# convert to labels to numbers
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA1', ['Y']] = 1
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA2', ['Y']] = 2
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'CA3', ['Y']] = 3
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'DG', ['Y']] = 4
unprocessedDataSet.loc[unprocessedDataSet['Y'] == 'Outer', ['Y']] = 0
#remove bogus records
unprocessedDataSet = unprocessedDataSet.replace([np.inf, -np.inf], np.nan)
unprocessedDataSet = unprocessedDataSet.dropna();
nulls = np.where(pd.isnull(unprocessedDataSet))
#Main Dataset
hipAreaData = unprocessedDataSet.drop(unprocessedDataSet.index[nulls[0]])
#Get all Non hippocampal pixels
isOuterData = hipAreaData['Y']==0
outerData = hipAreaData[isOuterData]
#Get all Hippocampal pixels: CA1, CA2, CA3, DG
isCA1Data = hipAreaData['Y']==1
CA1Data = hipAreaData[isCA1Data]
isCA2Data = hipAreaData['Y']==2
CA2Data = hipAreaData[isCA2Data]
isCA3Data = hipAreaData['Y']==3
CA3Data = hipAreaData[isCA3Data]
isDgData = hipAreaData['Y']==4
dgData = hipAreaData[isDgData]
#stack all hippocampall pixels together
data = [CA1Data, CA2Data, CA3Data, dgData]
xDevDataset = pd.concat(data)
#For every image, get its amount of hippocampal pixels
unique, counts = np.unique(xDevDataset.values[:,0], return_counts=True)
#Obtain -randomly- the same amount of non-hippocampal pixels for every image
#to create balanced dataset
outer_values = pd.DataFrame()
for img_name, count in zip(unique, counts) :
img_rows = outerData['Source'] == img_name
out_values = outerData.loc[ img_rows ]
o_values = out_values.sample(n = count, random_state = 2)
print(img_name+"; Total TP="+str(count)+"; total out_values="+str(out_values.shape[0])+"; Selected out_val="+str(o_values.shape[0]))
outer_values = outer_values.append(o_values)
print("Hippocampal pixel dataset shape, Rows={}, Columns={}".format(*xDevDataset.shape))
print("Non-Hippocampal pixel dataset shape, Rows={}, Columns={}".format(*outer_values.shape))
outerData = outer_values
return CA1Data, CA2Data, CA3Data, dgData, outerData
def splitDataSet(dataset, devSize=0.70, testSize=0.20, valSize=0.10) :
#remove bogus records
dataset = dataset.replace([np.inf, -np.inf], np.nan)
dataset = dataset.dropna();
#shuffle (consider using - sklearn.utils.shuffle(nd) - it's 3x faster)
dataset = dataset.sample(frac=1, random_state=99).reset_index(drop=True)
#Construct training dataset
devDataset = dataset.sample(frac=devSize, random_state=99)
#Split training dataset in data and label
yDevDataset = devDataset[['Y']]
xDevDataset = devDataset.drop('Y',axis=1)
if (devSize + testSize < 1) :
#Avoid duplicates
remaining = dataset.loc[~dataset.index.isin(devDataset.index), :]
#get test quantity
testQuantity = int(dataset.shape[0] * testSize)
#Create test set and Validation set
testDataset = remaining.sample(n=testQuantity, random_state=99)
valDataset = remaining.loc[~remaining.index.isin(testDataset.index), :]
#Split Test dataset in data and label
yTestDataset = testDataset[['Y']]
xTestDataset = testDataset.drop('Y',axis=1)
#Split Validation dataset in data and label
yValDataset = valDataset[['Y']]
xValDataset = valDataset.drop('Y',axis=1)
else:
testDataset = dataset.loc[~dataset.index.isin(devDataset.index), :]
yValDataset = np.array([])
xValDataset = np.array([])
return (xDevDataset, yDevDataset), (xTestDataset, yTestDataset), (xValDataset, yValDataset)
def getDataSet(fileName = ""):
ca1Data, ca2Data, ca3Data, dgData, outerData = loadFromFile(fileName)
#Split dataset in 70%, 20%, 10% proportion
ca1DevDataset, ca1TestDataset, ca1ValDataset = splitDataSet(ca1Data)
ca2DevDataset, ca2TestDataset, ca2ValDataset = splitDataSet(ca2Data)
ca3DevDataset, ca3TestDataset, ca3ValDataset = splitDataSet(ca3Data)
dgDevDataset, dgTestDataset, dgValDataset = splitDataSet(dgData)
outerDevDataset, outerTestDataset, outerValDataset = splitDataSet(outerData)
#Stack Hippocampal pixel together to create the dataset properly
frames = [ca1DevDataset[0], ca2DevDataset[0], ca3DevDataset[0], dgDevDataset[0], outerDevDataset[0]]
xDevDataset = pd.concat(frames)
frames = [ca1DevDataset[1], ca2DevDataset[1], ca3DevDataset[1], dgDevDataset[1], outerDevDataset[1]]
yDevDataset = pd.concat(frames)
frames = [ca1TestDataset[0], ca2TestDataset[0], ca3TestDataset[0], dgTestDataset[0], outerTestDataset[0]]
xTestDataset = | pd.concat(frames) | pandas.concat |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:biovectors]
# language: python
# name: conda-env-biovectors-py
# ---
# # Get Publication Times for Pubmed Abstracts
# +
import csv
from pathlib import Path
import time
import pandas as pd
import requests
import tqdm
# -
# Write the api caller function
def call_entrez(pubmed_ids):
while True:
try:
url = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?db=pubmed&retmode=json&id="
id_str = ",".join(map(str, pubmed_ids))
response = requests.get(f"{url}{id_str}")
assert response.status_code == 200
response = response.json()
return response["result"]
except Exception as e:
print(e)
print("Had an error will try again in thirty minutes!!")
time.sleep(1800)
pmid_df = pd.read_csv("output/pmid.tsv", sep="\t", names=["pmid"])
print(pmid_df.shape)
pmid_df.head()
# +
if Path("output/pmid_to_pub_date.tsv").exists():
# Start from checkpoint incase something goes wrong
parsed_ids = | pd.read_csv("output/pmid_to_pub_date.tsv", sep="\t") | pandas.read_csv |
## Tune the clip threshold and the confidence using EPR
from COSLIR import *
import pandas as pd
res = np.load('data/human/hSTR_656.npy')
threshold_list = [0, 1e-3, 3e-3, 6e-3, 1e-2]
conf_list = [0.5, 0.6, 0.7]
bootstrap_num, dim, _ = res.shape
print('bootstrap, dim', bootstrap_num, dim)
# load data
X = np.genfromtxt('data/hSTRING/ExpressionData5.csv', delimiter=',')
X = X[1:,1:]
Y = np.genfromtxt('data/hSTRING/ExpressionData6.csv', delimiter=',')
Y = Y[1:,1:]
X = X.T
Y = Y.T
print('sample size: X, ', X.shape, 'Y, ', Y.shape)
Expre = | pd.read_csv('data/hSTRING/ExpressionData1.csv') | pandas.read_csv |
# pylint: disable=E1101,E1103,W0232
from datetime import datetime, timedelta
from pandas.compat import range, lrange, lzip, u, zip
import operator
import re
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_array_equal
from pandas import period_range, date_range
from pandas.core.index import (Index, Float64Index, Int64Index, MultiIndex,
InvalidIndexError, NumericIndex)
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.tseries.period import PeriodIndex
from pandas.core.series import Series
from pandas.util.testing import (assert_almost_equal, assertRaisesRegexp,
assert_copy)
from pandas import compat
from pandas.compat import long
import pandas.util.testing as tm
import pandas.core.config as cf
from pandas.tseries.index import _to_m8
import pandas.tseries.offsets as offsets
import pandas as pd
from pandas.lib import Timestamp
class Base(object):
""" base class for index sub-class tests """
_holder = None
_compat_props = ['shape', 'ndim', 'size', 'itemsize', 'nbytes']
def verify_pickle(self,index):
unpickled = self.round_trip_pickle(index)
self.assertTrue(index.equals(unpickled))
def test_pickle_compat_construction(self):
# this is testing for pickle compat
if self._holder is None:
return
# need an object to create with
self.assertRaises(TypeError, self._holder)
def test_numeric_compat(self):
idx = self.create_index()
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : idx * 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __mul__",
lambda : 1 * idx)
div_err = "cannot perform __truediv__" if compat.PY3 else "cannot perform __div__"
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : idx / 1)
tm.assertRaisesRegexp(TypeError,
div_err,
lambda : 1 / idx)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : idx // 1)
tm.assertRaisesRegexp(TypeError,
"cannot perform __floordiv__",
lambda : 1 // idx)
def test_boolean_context_compat(self):
# boolean context compat
idx = self.create_index()
def f():
if idx:
pass
tm.assertRaisesRegexp(ValueError,'The truth value of a',f)
def test_ndarray_compat_properties(self):
idx = self.create_index()
self.assertTrue(idx.T.equals(idx))
self.assertTrue(idx.transpose().equals(idx))
values = idx.values
for prop in self._compat_props:
self.assertEqual(getattr(idx, prop), getattr(values, prop))
# test for validity
idx.nbytes
idx.values.nbytes
class TestIndex(Base, tm.TestCase):
_holder = Index
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(
unicodeIndex = tm.makeUnicodeIndex(100),
strIndex = tm.makeStringIndex(100),
dateIndex = tm.makeDateIndex(100),
intIndex = tm.makeIntIndex(100),
floatIndex = tm.makeFloatIndex(100),
boolIndex = Index([True,False]),
empty = Index([]),
tuples = MultiIndex.from_tuples(lzip(['foo', 'bar', 'baz'],
[1, 2, 3]))
)
for name, ind in self.indices.items():
setattr(self, name, ind)
def create_index(self):
return Index(list('abcde'))
def test_wrong_number_names(self):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
for ind in self.indices.values():
assertRaisesRegexp(ValueError, "^Length", testit, ind)
def test_set_name_methods(self):
new_name = "This is the new name for this index"
indices = (self.dateIndex, self.intIndex, self.unicodeIndex,
self.empty)
for ind in indices:
original_name = ind.name
new_ind = ind.set_names([new_name])
self.assertEqual(new_ind.name, new_name)
self.assertEqual(ind.name, original_name)
res = ind.rename(new_name, inplace=True)
# should return None
self.assertIsNone(res)
self.assertEqual(ind.name, new_name)
self.assertEqual(ind.names, [new_name])
#with assertRaisesRegexp(TypeError, "list-like"):
# # should still fail even if it would be the right length
# ind.set_names("a")
with assertRaisesRegexp(ValueError, "Level must be None"):
ind.set_names("a", level=0)
# rename in place just leaves tuples and other containers alone
name = ('A', 'B')
ind = self.intIndex
ind.rename(name, inplace=True)
self.assertEqual(ind.name, name)
self.assertEqual(ind.names, [name])
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.strIndex).__name__):
hash(self.strIndex)
def test_new_axis(self):
new_index = self.dateIndex[None, :]
self.assertEqual(new_index.ndim, 2)
tm.assert_isinstance(new_index, np.ndarray)
def test_copy_and_deepcopy(self):
from copy import copy, deepcopy
for func in (copy, deepcopy):
idx_copy = func(self.strIndex)
self.assertIsNot(idx_copy, self.strIndex)
self.assertTrue(idx_copy.equals(self.strIndex))
new_copy = self.strIndex.copy(deep=True, name="banana")
self.assertEqual(new_copy.name, "banana")
new_copy2 = self.intIndex.copy(dtype=int)
self.assertEqual(new_copy2.dtype.kind, 'i')
def test_duplicates(self):
idx = Index([0, 0, 0])
self.assertFalse(idx.is_unique)
def test_sort(self):
self.assertRaises(TypeError, self.strIndex.sort)
def test_mutability(self):
self.assertRaises(TypeError, self.strIndex.__setitem__, 0, 'foo')
def test_constructor(self):
# regular instance creation
tm.assert_contains_all(self.strIndex, self.strIndex)
tm.assert_contains_all(self.dateIndex, self.dateIndex)
# casting
arr = np.array(self.strIndex)
index = Index(arr)
tm.assert_contains_all(arr, index)
self.assert_numpy_array_equal(self.strIndex, index)
# copy
arr = np.array(self.strIndex)
index = Index(arr, copy=True, name='name')
tm.assert_isinstance(index, Index)
self.assertEqual(index.name, 'name')
assert_array_equal(arr, index)
arr[0] = "SOMEBIGLONGSTRING"
self.assertNotEqual(index[0], "SOMEBIGLONGSTRING")
# what to do here?
# arr = np.array(5.)
# self.assertRaises(Exception, arr.view, Index)
def test_constructor_corner(self):
# corner case
self.assertRaises(TypeError, Index, 0)
def test_constructor_from_series(self):
expected = DatetimeIndex([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
s = Series([Timestamp('20110101'),Timestamp('20120101'),Timestamp('20130101')])
result = Index(s)
self.assertTrue(result.equals(expected))
result = DatetimeIndex(s)
self.assertTrue(result.equals(expected))
# GH 6273
# create from a series, passing a freq
s = Series(pd.to_datetime(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']))
result = DatetimeIndex(s, freq='MS')
expected = DatetimeIndex(['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990'],freq='MS')
self.assertTrue(result.equals(expected))
df = pd.DataFrame(np.random.rand(5,3))
df['date'] = ['1-1-1990', '2-1-1990', '3-1-1990', '4-1-1990', '5-1-1990']
result = DatetimeIndex(df['date'], freq='MS')
# GH 6274
# infer freq of same
result = pd.infer_freq(df['date'])
self.assertEqual(result,'MS')
def test_constructor_ndarray_like(self):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike(object):
def __init__(self, array):
self.array = array
def __array__(self, dtype=None):
return self.array
for array in [np.arange(5),
np.array(['a', 'b', 'c']),
date_range('2000-01-01', periods=3).values]:
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
self.assertTrue(result.equals(expected))
def test_index_ctor_infer_periodindex(self):
xp = period_range('2012-1-1', freq='M', periods=3)
rs = Index(xp)
assert_array_equal(rs, xp)
tm.assert_isinstance(rs, PeriodIndex)
def test_constructor_simple_new(self):
idx = Index([1, 2, 3, 4, 5], name='int')
result = idx._simple_new(idx, 'int')
self.assertTrue(result.equals(idx))
idx = Index([1.1, np.nan, 2.2, 3.0], name='float')
result = idx._simple_new(idx, 'float')
self.assertTrue(result.equals(idx))
idx = Index(['A', 'B', 'C', np.nan], name='obj')
result = idx._simple_new(idx, 'obj')
self.assertTrue(result.equals(idx))
def test_copy(self):
i = Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_legacy_pickle_identity(self):
# GH 8431
pth = tm.get_data_path()
s1 = pd.read_pickle(os.path.join(pth,'s1-0.12.0.pickle'))
s2 = pd.read_pickle(os.path.join(pth,'s2-0.12.0.pickle'))
self.assertFalse(s1.index.identical(s2.index))
self.assertFalse(s1.index.equals(s2.index))
def test_astype(self):
casted = self.intIndex.astype('i8')
# it works!
casted.get_loc(5)
# pass on name
self.intIndex.name = 'foobar'
casted = self.intIndex.astype('i8')
self.assertEqual(casted.name, 'foobar')
def test_compat(self):
self.strIndex.tolist()
def test_equals(self):
# same
self.assertTrue(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'c'])))
# different length
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b'])))
# same length, different values
self.assertFalse(Index(['a', 'b', 'c']).equals(Index(['a', 'b', 'd'])))
# Must also be an Index
self.assertFalse(Index(['a', 'b', 'c']).equals(['a', 'b', 'c']))
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(['b', 'c', 'd'])
#test 0th element
self.assertTrue(Index(['a', 'b', 'c', 'd']).equals(
result.insert(0, 'a')))
#test Nth element that follows Python list behavior
self.assertTrue(Index(['b', 'c', 'e', 'd']).equals(
result.insert(-1, 'e')))
#test loc +/- neq (0, -1)
self.assertTrue(result.insert(1, 'z').equals(
result.insert(-2, 'z')))
#test empty
null_index = Index([])
self.assertTrue(Index(['a']).equals(
null_index.insert(0, 'a')))
def test_delete(self):
idx = Index(['a', 'b', 'c', 'd'], name='idx')
expected = Index(['b', 'c', 'd'], name='idx')
result = idx.delete(0)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
expected = Index(['a', 'b', 'c'], name='idx')
result = idx.delete(-1)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_identical(self):
# index
i1 = Index(['a', 'b', 'c'])
i2 = Index(['a', 'b', 'c'])
self.assertTrue(i1.identical(i2))
i1 = i1.rename('foo')
self.assertTrue(i1.equals(i2))
self.assertFalse(i1.identical(i2))
i2 = i2.rename('foo')
self.assertTrue(i1.identical(i2))
i3 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')])
i4 = Index([('a', 'a'), ('a', 'b'), ('b', 'a')], tupleize_cols=False)
self.assertFalse(i3.identical(i4))
def test_is_(self):
ind = Index(range(10))
self.assertTrue(ind.is_(ind))
self.assertTrue(ind.is_(ind.view().view().view().view()))
self.assertFalse(ind.is_(Index(range(10))))
self.assertFalse(ind.is_(ind.copy()))
self.assertFalse(ind.is_(ind.copy(deep=False)))
self.assertFalse(ind.is_(ind[:]))
self.assertFalse(ind.is_(ind.view(np.ndarray).view(Index)))
self.assertFalse(ind.is_(np.array(range(10))))
# quasi-implementation dependent
self.assertTrue(ind.is_(ind.view()))
ind2 = ind.view()
ind2.name = 'bob'
self.assertTrue(ind.is_(ind2))
self.assertTrue(ind2.is_(ind))
# doesn't matter if Indices are *actually* views of underlying data,
self.assertFalse(ind.is_(Index(ind.values)))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
self.assertFalse(ind1.is_(ind2))
def test_asof(self):
d = self.dateIndex[0]
self.assertIs(self.dateIndex.asof(d), d)
self.assertTrue(np.isnan(self.dateIndex.asof(d - timedelta(1))))
d = self.dateIndex[-1]
self.assertEqual(self.dateIndex.asof(d + timedelta(1)), d)
d = self.dateIndex[0].to_datetime()
tm.assert_isinstance(self.dateIndex.asof(d), Timestamp)
def test_asof_datetime_partial(self):
idx = pd.date_range('2010-01-01', periods=2, freq='m')
expected = Timestamp('2010-01-31')
result = idx.asof('2010-02')
self.assertEqual(result, expected)
def test_nanosecond_index_access(self):
s = Series([Timestamp('20130101')]).values.view('i8')[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
#self.assertEqual(first_value, x['2013-01-01 00:00:00.000000050+0000'])
self.assertEqual(first_value, x[Timestamp(np.datetime64('2013-01-01 00:00:00.000000050+0000', 'ns'))])
def test_argsort(self):
result = self.strIndex.argsort()
expected = np.array(self.strIndex).argsort()
self.assert_numpy_array_equal(result, expected)
def test_comparators(self):
index = self.dateIndex
element = index[len(index) // 2]
element = _to_m8(element)
arr = np.array(index)
def _check(op):
arr_result = op(arr, element)
index_result = op(index, element)
self.assertIsInstance(index_result, np.ndarray)
self.assert_numpy_array_equal(arr_result, index_result)
_check(operator.eq)
_check(operator.ne)
_check(operator.gt)
_check(operator.lt)
_check(operator.ge)
_check(operator.le)
def test_booleanindex(self):
boolIdx = np.repeat(True, len(self.strIndex)).astype(bool)
boolIdx[5:30:2] = False
subIndex = self.strIndex[boolIdx]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
subIndex = self.strIndex[list(boolIdx)]
for i, val in enumerate(subIndex):
self.assertEqual(subIndex.get_loc(val), i)
def test_fancy(self):
sl = self.strIndex[[1, 2, 3]]
for i in sl:
self.assertEqual(i, sl[sl.get_loc(i)])
def test_empty_fancy(self):
empty_farr = np.array([], dtype=np.float_)
empty_iarr = np.array([], dtype=np.int_)
empty_barr = np.array([], dtype=np.bool_)
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
for idx in [self.strIndex, self.intIndex, self.floatIndex]:
empty_idx = idx.__class__([])
values = idx.values
self.assertTrue(idx[[]].identical(empty_idx))
self.assertTrue(idx[empty_iarr].identical(empty_idx))
self.assertTrue(idx[empty_barr].identical(empty_idx))
# np.ndarray only accepts ndarray of int & bool dtypes, so should
# Index.
self.assertRaises(IndexError, idx.__getitem__, empty_farr)
def test_getitem(self):
arr = np.array(self.dateIndex)
exp = self.dateIndex[5]
exp = _to_m8(exp)
self.assertEqual(exp, arr[5])
def test_shift(self):
shifted = self.dateIndex.shift(0, timedelta(1))
self.assertIs(shifted, self.dateIndex)
shifted = self.dateIndex.shift(5, timedelta(1))
self.assert_numpy_array_equal(shifted, self.dateIndex + timedelta(5))
shifted = self.dateIndex.shift(1, 'B')
self.assert_numpy_array_equal(shifted, self.dateIndex + offsets.BDay())
shifted.name = 'shifted'
self.assertEqual(shifted.name, shifted.shift(1, 'D').name)
def test_intersection(self):
first = self.strIndex[:20]
second = self.strIndex[:10]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# Corner cases
inter = first.intersection(first)
self.assertIs(inter, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.intersection, 0.5)
idx1 = Index([1, 2, 3, 4, 5], name='idx')
# if target has the same name, it is preserved
idx2 = Index([3, 4, 5, 6, 7], name='idx')
expected2 = Index([3, 4, 5], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(result2.equals(expected2))
self.assertEqual(result2.name, expected2.name)
# if target name is different, it will be reset
idx3 = Index([3, 4, 5, 6, 7], name='other')
expected3 = Index([3, 4, 5], name=None)
result3 = idx1.intersection(idx3)
self.assertTrue(result3.equals(expected3))
self.assertEqual(result3.name, expected3.name)
# non monotonic
idx1 = Index([5, 3, 2, 4, 1], name='idx')
idx2 = Index([4, 7, 6, 5, 3], name='idx')
result2 = idx1.intersection(idx2)
self.assertTrue(tm.equalContents(result2, expected2))
self.assertEqual(result2.name, expected2.name)
idx3 = Index([4, 7, 6, 5, 3], name='other')
result3 = idx1.intersection(idx3)
self.assertTrue(tm.equalContents(result3, expected3))
self.assertEqual(result3.name, expected3.name)
# non-monotonic non-unique
idx1 = Index(['A','B','A','C'])
idx2 = Index(['B','D'])
expected = Index(['B'], dtype='object')
result = idx1.intersection(idx2)
self.assertTrue(result.equals(expected))
def test_union(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
everything = self.strIndex[:20]
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# Corner cases
union = first.union(first)
self.assertIs(union, first)
union = first.union([])
self.assertIs(union, first)
union = Index([]).union(first)
self.assertIs(union, first)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.union, 0.5)
# preserve names
first.name = 'A'
second.name = 'A'
union = first.union(second)
self.assertEqual(union.name, 'A')
second.name = 'B'
union = first.union(second)
self.assertIsNone(union.name)
def test_add(self):
# - API change GH 8226
with tm.assert_produces_warning():
self.strIndex + self.strIndex
firstCat = self.strIndex.union(self.dateIndex)
secondCat = self.strIndex.union(self.strIndex)
if self.dateIndex.dtype == np.object_:
appended = np.append(self.strIndex, self.dateIndex)
else:
appended = np.append(self.strIndex, self.dateIndex.astype('O'))
self.assertTrue(tm.equalContents(firstCat, appended))
self.assertTrue(tm.equalContents(secondCat, self.strIndex))
tm.assert_contains_all(self.strIndex, firstCat)
tm.assert_contains_all(self.strIndex, secondCat)
tm.assert_contains_all(self.dateIndex, firstCat)
def test_append_multiple(self):
index = Index(['a', 'b', 'c', 'd', 'e', 'f'])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
self.assertTrue(result.equals(index))
# empty
result = index.append([])
self.assertTrue(result.equals(index))
def test_append_empty_preserve_name(self):
left = Index([], name='foo')
right = Index([1, 2, 3], name='foo')
result = left.append(right)
self.assertEqual(result.name, 'foo')
left = Index([], name='foo')
right = Index([1, 2, 3], name='bar')
result = left.append(right)
self.assertIsNone(result.name)
def test_add_string(self):
# from bug report
index = Index(['a', 'b', 'c'])
index2 = index + 'foo'
self.assertNotIn('a', index2)
self.assertIn('afoo', index2)
def test_iadd_string(self):
index = pd.Index(['a', 'b', 'c'])
# doesn't fail test unless there is a check before `+=`
self.assertIn('a', index)
index += '_x'
self.assertIn('a_x', index)
def test_difference(self):
first = self.strIndex[5:20]
second = self.strIndex[:10]
answer = self.strIndex[10:20]
first.name = 'name'
# different names
result = first.difference(second)
self.assertTrue(tm.equalContents(result, answer))
self.assertEqual(result.name, None)
# same names
second.name = 'name'
result = first.difference(second)
self.assertEqual(result.name, 'name')
# with empty
result = first.difference([])
self.assertTrue(tm.equalContents(result, first))
self.assertEqual(result.name, first.name)
# with everythin
result = first.difference(first)
self.assertEqual(len(result), 0)
self.assertEqual(result.name, first.name)
# non-iterable input
assertRaisesRegexp(TypeError, "iterable", first.diff, 0.5)
def test_symmetric_diff(self):
# smoke
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = Index([2, 3, 4, 5])
result = idx1.sym_diff(idx2)
expected = Index([1, 5])
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# __xor__ syntax
expected = idx1 ^ idx2
self.assertTrue(tm.equalContents(result, expected))
self.assertIsNone(result.name)
# multiIndex
idx1 = MultiIndex.from_tuples(self.tuples)
idx2 = MultiIndex.from_tuples([('foo', 1), ('bar', 3)])
result = idx1.sym_diff(idx2)
expected = MultiIndex.from_tuples([('bar', 2), ('baz', 3), ('bar', 3)])
self.assertTrue(tm.equalContents(result, expected))
# nans:
# GH #6444, sorting of nans. Make sure the number of nans is right
# and the correct non-nan values are there. punt on sorting.
idx1 = Index([1, 2, 3, np.nan])
idx2 = Index([0, 1, np.nan])
result = idx1.sym_diff(idx2)
# expected = Index([0.0, np.nan, 2.0, 3.0, np.nan])
nans = pd.isnull(result)
self.assertEqual(nans.sum(), 2)
self.assertEqual((~nans).sum(), 3)
[self.assertIn(x, result) for x in [0.0, 2.0, 3.0]]
# other not an Index:
idx1 = Index([1, 2, 3, 4], name='idx1')
idx2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = idx1.sym_diff(idx2)
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'idx1')
result = idx1.sym_diff(idx2, result_name='new_name')
self.assertTrue(tm.equalContents(result, expected))
self.assertEqual(result.name, 'new_name')
# other isn't iterable
with tm.assertRaises(TypeError):
Index(idx1,dtype='object') - 1
def test_pickle(self):
self.verify_pickle(self.strIndex)
self.strIndex.name = 'foo'
self.verify_pickle(self.strIndex)
self.verify_pickle(self.dateIndex)
def test_is_numeric(self):
self.assertFalse(self.dateIndex.is_numeric())
self.assertFalse(self.strIndex.is_numeric())
self.assertTrue(self.intIndex.is_numeric())
self.assertTrue(self.floatIndex.is_numeric())
def test_is_object(self):
self.assertTrue(self.strIndex.is_object())
self.assertTrue(self.boolIndex.is_object())
self.assertFalse(self.intIndex.is_object())
self.assertFalse(self.dateIndex.is_object())
self.assertFalse(self.floatIndex.is_object())
def test_is_all_dates(self):
self.assertTrue(self.dateIndex.is_all_dates)
self.assertFalse(self.strIndex.is_all_dates)
self.assertFalse(self.intIndex.is_all_dates)
def test_summary(self):
self._check_method_works(Index.summary)
# GH3869
ind = Index(['{other}%s', "~:{range}:0"], name='A')
result = ind.summary()
# shouldn't be formatted accidentally.
self.assertIn('~:{range}:0', result)
self.assertIn('{other}%s', result)
def test_format(self):
self._check_method_works(Index.format)
index = Index([datetime.now()])
formatted = index.format()
expected = [str(index[0])]
self.assertEqual(formatted, expected)
# 2845
index = Index([1, 2.0+3.0j, np.nan])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
# is this really allowed?
index = Index([1, 2.0+3.0j, None])
formatted = index.format()
expected = [str(index[0]), str(index[1]), u('NaN')]
self.assertEqual(formatted, expected)
self.strIndex[:0].format()
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
inc = timedelta(hours=4)
dates = Index([dt + inc for dt in self.dateIndex], name='something')
formatted = dates.format(name=True)
self.assertEqual(formatted[0], 'something')
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ['2012-02-07 00:00:00', '2012-02-07 23:00:00']
self.assertEqual(len(result), 2)
self.assertEqual(result, expected)
def test_format_none(self):
values = ['a', 'b', 'c', None]
idx = Index(values)
idx.format()
self.assertIsNone(idx[3])
def test_take(self):
indexer = [4, 3, 0, 2]
result = self.dateIndex.take(indexer)
expected = self.dateIndex[indexer]
self.assertTrue(result.equals(expected))
def _check_method_works(self, method):
method(self.empty)
method(self.dateIndex)
method(self.unicodeIndex)
method(self.strIndex)
method(self.intIndex)
method(self.tuples)
def test_get_indexer(self):
idx1 = Index([1, 2, 3, 4, 5])
idx2 = Index([2, 4, 6])
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, [1, 3, -1])
r1 = idx2.get_indexer(idx1, method='pad')
assert_almost_equal(r1, [-1, 0, 0, 1, 1])
rffill1 = idx2.get_indexer(idx1, method='ffill')
assert_almost_equal(r1, rffill1)
r1 = idx2.get_indexer(idx1, method='backfill')
assert_almost_equal(r1, [0, 0, 1, 1, 2])
rbfill1 = idx2.get_indexer(idx1, method='bfill')
assert_almost_equal(r1, rbfill1)
def test_slice_locs(self):
for dtype in [int, float]:
idx = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(idx)
self.assertEqual(idx.slice_locs(start=2), (2, n))
self.assertEqual(idx.slice_locs(start=3), (3, n))
self.assertEqual(idx.slice_locs(3, 8), (3, 6))
self.assertEqual(idx.slice_locs(5, 10), (3, n))
self.assertEqual(idx.slice_locs(5.0, 10.0), (3, n))
self.assertEqual(idx.slice_locs(4.5, 10.5), (3, 8))
self.assertEqual(idx.slice_locs(end=8), (0, 6))
self.assertEqual(idx.slice_locs(end=9), (0, 7))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(8, 2), (2, 6))
self.assertEqual(idx2.slice_locs(8.5, 1.5), (2, 6))
self.assertEqual(idx2.slice_locs(7, 3), (2, 5))
self.assertEqual(idx2.slice_locs(10.5, -1), (0, n))
def test_slice_locs_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
self.assertEqual(idx.slice_locs('a', 'd'), (0, 6))
self.assertEqual(idx.slice_locs(end='d'), (0, 6))
self.assertEqual(idx.slice_locs('a', 'c'), (0, 4))
self.assertEqual(idx.slice_locs('b', 'd'), (2, 6))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs('d', 'a'), (0, 6))
self.assertEqual(idx2.slice_locs(end='a'), (0, 6))
self.assertEqual(idx2.slice_locs('d', 'b'), (0, 4))
self.assertEqual(idx2.slice_locs('c', 'a'), (2, 6))
for dtype in [int, float]:
idx = Index(np.array([10, 12, 12, 14], dtype=dtype))
self.assertEqual(idx.slice_locs(12, 12), (1, 3))
self.assertEqual(idx.slice_locs(11, 13), (1, 3))
idx2 = idx[::-1]
self.assertEqual(idx2.slice_locs(12, 12), (1, 3))
self.assertEqual(idx2.slice_locs(13, 11), (1, 3))
def test_slice_locs_na(self):
idx = Index([np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, start=1.5)
self.assertRaises(KeyError, idx.slice_locs, end=1.5)
self.assertEqual(idx.slice_locs(1), (1, 3))
self.assertEqual(idx.slice_locs(np.nan), (0, 3))
idx = Index([np.nan, np.nan, 1, 2])
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_drop(self):
n = len(self.strIndex)
dropped = self.strIndex.drop(self.strIndex[lrange(5, 10)])
expected = self.strIndex[lrange(5) + lrange(10, n)]
self.assertTrue(dropped.equals(expected))
self.assertRaises(ValueError, self.strIndex.drop, ['foo', 'bar'])
dropped = self.strIndex.drop(self.strIndex[0])
expected = self.strIndex[1:]
self.assertTrue(dropped.equals(expected))
ser = Index([1, 2, 3])
dropped = ser.drop(1)
expected = Index([2, 3])
self.assertTrue(dropped.equals(expected))
def test_tuple_union_bug(self):
import pandas
import numpy as np
aidx1 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
dtype=[('num', int), ('let', 'a1')])
aidx2 = np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B'), (1, 'C'), (2,
'C')], dtype=[('num', int), ('let', 'a1')])
idx1 = pandas.Index(aidx1)
idx2 = pandas.Index(aidx2)
# intersection broken?
int_idx = idx1.intersection(idx2)
# needs to be 1d like idx1 and idx2
expected = idx1[:4] # pandas.Index(sorted(set(idx1) & set(idx2)))
self.assertEqual(int_idx.ndim, 1)
self.assertTrue(int_idx.equals(expected))
# union broken
union_idx = idx1.union(idx2)
expected = idx2
self.assertEqual(union_idx.ndim, 1)
self.assertTrue(union_idx.equals(expected))
def test_is_monotonic_incomparable(self):
index = Index([5, datetime.now(), 7])
self.assertFalse(index.is_monotonic)
self.assertFalse(index.is_monotonic_decreasing)
def test_get_set_value(self):
values = np.random.randn(100)
date = self.dateIndex[67]
assert_almost_equal(self.dateIndex.get_value(values, date),
values[67])
self.dateIndex.set_value(values, date, 10)
self.assertEqual(values[67], 10)
def test_isin(self):
values = ['foo', 'bar', 'quux']
idx = Index(['qux', 'baz', 'foo', 'bar'])
result = idx.isin(values)
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = Index([])
result = idx.isin(values)
self.assertEqual(len(result), 0)
self.assertEqual(result.dtype, np.bool_)
def test_isin_nan(self):
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Index(['a', pd.NaT]).isin([pd.NaT]), [False, True])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([float('nan')]), [False, False])
self.assert_numpy_array_equal(
Index(['a', np.nan]).isin([pd.NaT]), [False, False])
# Float64Index overrides isin, so must be checked separately
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([np.nan]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([float('nan')]), [False, True])
self.assert_numpy_array_equal(
Float64Index([1.0, np.nan]).isin([pd.NaT]), [False, True])
def test_isin_level_kwarg(self):
def check_idx(idx):
values = idx.tolist()[-2:] + ['nonexisting']
expected = np.array([False, False, True, True])
self.assert_numpy_array_equal(expected, idx.isin(values, level=0))
self.assert_numpy_array_equal(expected, idx.isin(values, level=-1))
self.assertRaises(IndexError, idx.isin, values, level=1)
self.assertRaises(IndexError, idx.isin, values, level=10)
self.assertRaises(IndexError, idx.isin, values, level=-2)
self.assertRaises(KeyError, idx.isin, values, level=1.0)
self.assertRaises(KeyError, idx.isin, values, level='foobar')
idx.name = 'foobar'
self.assert_numpy_array_equal(expected,
idx.isin(values, level='foobar'))
self.assertRaises(KeyError, idx.isin, values, level='xyzzy')
self.assertRaises(KeyError, idx.isin, values, level=np.nan)
check_idx(Index(['qux', 'baz', 'foo', 'bar']))
# Float64Index overrides isin, so must be checked separately
check_idx(Float64Index([1.0, 2.0, 3.0, 4.0]))
def test_boolean_cmp(self):
values = [1, 2, 3, 4]
idx = Index(values)
res = (idx == values)
self.assert_numpy_array_equal(res,np.array([True,True,True,True],dtype=bool))
def test_get_level_values(self):
result = self.strIndex.get_level_values(0)
self.assertTrue(result.equals(self.strIndex))
def test_slice_keep_name(self):
idx = Index(['a', 'b'], name='asdf')
self.assertEqual(idx.name, idx[1:].name)
def test_join_self(self):
# instance attributes of the form self.<name>Index
indices = 'unicode', 'str', 'date', 'int', 'float'
kinds = 'outer', 'inner', 'left', 'right'
for index_kind in indices:
res = getattr(self, '{0}Index'.format(index_kind))
for kind in kinds:
joined = res.join(res, how=kind)
self.assertIs(res, joined)
def test_indexing_doesnt_change_class(self):
idx = Index([1, 2, 3, 'a', 'b', 'c'])
self.assertTrue(idx[1:3].identical(
pd.Index([2, 3], dtype=np.object_)))
self.assertTrue(idx[[0,1]].identical(
pd.Index([1, 2], dtype=np.object_)))
def test_outer_join_sort(self):
left_idx = Index(np.random.permutation(15))
right_idx = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
joined = left_idx.join(right_idx, how='outer')
# right_idx in this case because DatetimeIndex has join precedence over
# Int64Index
expected = right_idx.astype(object).union(left_idx.astype(object))
tm.assert_index_equal(joined, expected)
def test_nan_first_take_datetime(self):
idx = Index([pd.NaT, Timestamp('20130101'), Timestamp('20130102')])
res = idx.take([-1, 0, 1])
exp = Index([idx[-1], idx[0], idx[1]])
tm.assert_index_equal(res, exp)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self):
# GH6552
idx = pd.Index([0, 1, 2])
dt_idx = pd.date_range('20130101', periods=3)
idx.name = None
self.assertEqual(idx.reindex([])[0].name, None)
self.assertEqual(idx.reindex(np.array([]))[0].name, None)
self.assertEqual(idx.reindex(idx.tolist())[0].name, None)
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, None)
self.assertEqual(idx.reindex(idx.values)[0].name, None)
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, None)
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, None)
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, None)
idx.name = 'foobar'
self.assertEqual(idx.reindex([])[0].name, 'foobar')
self.assertEqual(idx.reindex(np.array([]))[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist())[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.tolist()[:-1])[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(idx.values[:-1])[0].name, 'foobar')
# Must preserve name even if dtype changes.
self.assertEqual(idx.reindex(dt_idx.values)[0].name, 'foobar')
self.assertEqual(idx.reindex(dt_idx.tolist())[0].name, 'foobar')
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type([]), np.object_)
self.assertEqual(get_reindex_type(np.array([])), np.object_)
self.assertEqual(get_reindex_type(np.array([], dtype=np.int64)),
np.object_)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self):
# GH7774
idx = pd.Index(list('abc'))
def get_reindex_type(target):
return idx.reindex(target)[0].dtype.type
self.assertEqual(get_reindex_type(pd.Int64Index([])), np.int64)
self.assertEqual(get_reindex_type(pd.Float64Index([])), np.float64)
self.assertEqual(get_reindex_type(pd.DatetimeIndex([])), np.datetime64)
reindexed = idx.reindex(pd.MultiIndex([pd.Int64Index([]),
pd.Float64Index([])],
[[], []]))[0]
self.assertEqual(reindexed.levels[0].dtype.type, np.int64)
self.assertEqual(reindexed.levels[1].dtype.type, np.float64)
class Numeric(Base):
def test_numeric_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
didx = self._holder(np.arange(5,dtype='int64')**2
)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx * idx
tm.assert_index_equal(result, didx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5,dtype='int64')
tm.assert_index_equal(result, self._holder(np.arange(5,dtype='int64')*5))
result = idx * np.arange(5,dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5,dtype='float64')+0.1)
tm.assert_index_equal(result,
Float64Index(np.arange(5,dtype='float64')*(np.arange(5,dtype='float64')+0.1)))
# invalid
self.assertRaises(TypeError, lambda : idx * date_range('20130101',periods=5))
self.assertRaises(ValueError, lambda : idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda : idx * np.array([1,2]))
def test_explicit_conversions(self):
# GH 8608
# add/sub are overriden explicity for Float/Int Index
idx = self._holder(np.arange(5,dtype='int64'))
# float conversions
arr = np.arange(5,dtype='int64')*3.2
expected = Float64Index(arr)
fidx = idx * 3.2
tm.assert_index_equal(fidx,expected)
fidx = 3.2 * idx
tm.assert_index_equal(fidx,expected)
# interops with numpy arrays
expected = Float64Index(arr)
a = np.zeros(5,dtype='float64')
result = fidx - a
tm.assert_index_equal(result,expected)
expected = Float64Index(-arr)
a = np.zeros(5,dtype='float64')
result = a - fidx
tm.assert_index_equal(result,expected)
def test_ufunc_compat(self):
idx = self._holder(np.arange(5,dtype='int64'))
result = np.sin(idx)
expected = Float64Index(np.sin(np.arange(5,dtype='int64')))
tm.assert_index_equal(result, expected)
class TestFloat64Index(Numeric, tm.TestCase):
_holder = Float64Index
_multiprocess_can_split_ = True
def setUp(self):
self.mixed = Float64Index([1.5, 2, 3, 4, 5])
self.float = Float64Index(np.arange(5) * 2.5)
def create_index(self):
return Float64Index(np.arange(5,dtype='float64'))
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.float).__name__):
hash(self.float)
def test_repr_roundtrip(self):
for ind in (self.mixed, self.float):
tm.assert_index_equal(eval(repr(ind)), ind)
def check_is_index(self, i):
self.assertIsInstance(i, Index)
self.assertNotIsInstance(i, Float64Index)
def check_coerce(self, a, b, is_float_index=True):
self.assertTrue(a.equals(b))
if is_float_index:
self.assertIsInstance(b, Float64Index)
else:
self.check_is_index(b)
def test_constructor(self):
# explicit construction
index = Float64Index([1,2,3,4,5])
self.assertIsInstance(index, Float64Index)
self.assertTrue((index.values == np.array([1,2,3,4,5],dtype='float64')).all())
index = Float64Index(np.array([1,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
index = Float64Index([1.,2,3,4,5])
self.assertIsInstance(index, Float64Index)
index = Float64Index(np.array([1.,2,3,4,5]))
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, float)
index = Float64Index(np.array([1.,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
index = Float64Index(np.array([1,2,3,4,5]),dtype=np.float32)
self.assertIsInstance(index, Float64Index)
self.assertEqual(index.dtype, np.float64)
# nan handling
result = Float64Index([np.nan, np.nan])
self.assertTrue(pd.isnull(result.values).all())
result = Float64Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
result = Index(np.array([np.nan]))
self.assertTrue(pd.isnull(result.values).all())
def test_constructor_invalid(self):
# invalid
self.assertRaises(TypeError, Float64Index, 0.)
self.assertRaises(TypeError, Float64Index, ['a','b',0.])
self.assertRaises(TypeError, Float64Index, [Timestamp('20130101')])
def test_constructor_coerce(self):
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5]))
self.check_coerce(self.float,Index(np.arange(5) * 2.5))
self.check_coerce(self.float,Index(np.array(np.arange(5) * 2.5, dtype=object)))
def test_constructor_explicit(self):
# these don't auto convert
self.check_coerce(self.float,Index((np.arange(5) * 2.5), dtype=object),
is_float_index=False)
self.check_coerce(self.mixed,Index([1.5, 2, 3, 4, 5],dtype=object),
is_float_index=False)
def test_astype(self):
result = self.float.astype(object)
self.assertTrue(result.equals(self.float))
self.assertTrue(self.float.equals(result))
self.check_is_index(result)
i = self.mixed.copy()
i.name = 'foo'
result = i.astype(object)
self.assertTrue(result.equals(i))
self.assertTrue(i.equals(result))
self.check_is_index(result)
def test_equals(self):
i = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,2.0])
self.assertTrue(i.equals(i2))
i = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i))
self.assertTrue(i.identical(i))
i2 = Float64Index([1.0,np.nan])
self.assertTrue(i.equals(i2))
def test_get_loc_na(self):
idx = Float64Index([np.nan, 1, 2])
self.assertEqual(idx.get_loc(1), 1)
self.assertEqual(idx.get_loc(np.nan), 0)
idx = Float64Index([np.nan, 1, np.nan])
self.assertEqual(idx.get_loc(1), 1)
self.assertRaises(KeyError, idx.slice_locs, np.nan)
def test_contains_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(np.nan in i)
def test_contains_not_nans(self):
i = Float64Index([1.0, 2.0, np.nan])
self.assertTrue(1.0 in i)
def test_doesnt_contain_all_the_things(self):
i = Float64Index([np.nan])
self.assertFalse(i.isin([0]).item())
self.assertFalse(i.isin([1]).item())
self.assertTrue(i.isin([np.nan]).item())
def test_nan_multiple_containment(self):
i = Float64Index([1.0, np.nan])
np.testing.assert_array_equal(i.isin([1.0]), np.array([True, False]))
np.testing.assert_array_equal(i.isin([2.0, np.pi]),
np.array([False, False]))
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, True]))
np.testing.assert_array_equal(i.isin([1.0, np.nan]),
np.array([True, True]))
i = Float64Index([1.0, 2.0])
np.testing.assert_array_equal(i.isin([np.nan]),
np.array([False, False]))
def test_astype_from_object(self):
index = Index([1.0, np.nan, 0.2], dtype='object')
result = index.astype(float)
expected = Float64Index([1.0, np.nan, 0.2])
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
_multiprocess_can_split_ = True
def setUp(self):
self.index = Int64Index(np.arange(0, 20, 2))
def create_index(self):
return Int64Index(np.arange(5,dtype='int64'))
def test_too_many_names(self):
def testit():
self.index.names = ["roger", "harold"]
assertRaisesRegexp(ValueError, "^Length", testit)
def test_constructor(self):
# pass list, coerce fine
index = Int64Index([-5, 0, 1, 2])
expected = np.array([-5, 0, 1, 2], dtype=np.int64)
self.assert_numpy_array_equal(index, expected)
# from iterable
index = Int64Index(iter([-5, 0, 1, 2]))
self.assert_numpy_array_equal(index, expected)
# scalar raise Exception
self.assertRaises(TypeError, Int64Index, 5)
# copy
arr = self.index.values
new_index = Int64Index(arr, copy=True)
self.assert_numpy_array_equal(new_index, self.index)
val = arr[0] + 3000
# this should not change index
arr[0] = val
self.assertNotEqual(new_index[0], val)
def test_constructor_corner(self):
arr = np.array([1, 2, 3, 4], dtype=object)
index = Int64Index(arr)
self.assertEqual(index.values.dtype, np.int64)
self.assertTrue(index.equals(arr))
# preventing casting
arr = np.array([1, '2', 3, '4'], dtype=object)
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr)
arr_with_floats = [0, 2, 3, 4, 5, 1.25, 3, -1]
with tm.assertRaisesRegexp(TypeError, 'casting'):
Int64Index(arr_with_floats)
def test_hash_error(self):
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(self.index).__name__):
hash(self.index)
def test_copy(self):
i = Int64Index([], name='Foo')
i_copy = i.copy()
self.assertEqual(i_copy.name, 'Foo')
def test_view(self):
i = Int64Index([], name='Foo')
i_view = i.view()
self.assertEqual(i_view.name, 'Foo')
def test_coerce_list(self):
# coerce things
arr = Index([1, 2, 3, 4])
tm.assert_isinstance(arr, Int64Index)
# but not if explicit dtype passed
arr = Index([1, 2, 3, 4], dtype=object)
tm.assert_isinstance(arr, Index)
def test_dtype(self):
self.assertEqual(self.index.dtype, np.int64)
def test_is_monotonic(self):
self.assertTrue(self.index.is_monotonic)
self.assertTrue(self.index.is_monotonic_increasing)
self.assertFalse(self.index.is_monotonic_decreasing)
index = Int64Index([4, 3, 2, 1])
self.assertFalse(index.is_monotonic)
self.assertTrue(index.is_monotonic_decreasing)
index = Int64Index([1])
self.assertTrue(index.is_monotonic)
self.assertTrue(index.is_monotonic_increasing)
self.assertTrue(index.is_monotonic_decreasing)
def test_is_monotonic_na(self):
examples = [Index([np.nan]),
| Index([np.nan, 1]) | pandas.core.index.Index |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
energy_native = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
energy_native.append(df['loss'].values[0])
energy_native = np.array(energy_native)
print(energy_native, np.mean(energy_native), np.min(energy_native), np.max(energy_native), np.std(energy_native))
def plot_3drobot(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
# pdb_list = pd.read_csv('pdb_local_rot.txt')['pdb'].values
# pdb_list = pd.read_csv('pdb_profile_diff.txt')['pdb'].values
# pdb_list = pd.read_csv(f'{root_dir}/pdb_profile_diff_match.txt')['pdb'].values
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
# data_flag = 'exp005_v2'
# data_flag = 'exp5'
# data_flag = 'exp6'
# data_flag = 'exp12'
# data_flag = 'exp14'
# data_flag = 'exp17'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp50'
# data_flag = 'exp50_relax'
# data_flag = 'exp49'
# data_flag = 'exp49_relax'
# data_flag = 'exp54'
# data_flag = 'exp61'
# data_flag = 'rosetta'
# data_flag = 'rosetta_relax'
# data_flag = 'rosetta_cen'
# if not os.path.exists(f'{root_dir}/fig_3drobot_{data_flag}'):
# os.system(f'mkdir -p {root_dir}/fig_3drobot_{data_flag}')
correct = 0
rank = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
decoy_name = df['NAME'].values
assert(decoy_name[0] == 'native.pdb')
ind = (df['loss'] != 999)
loss = df['loss'][ind].values
rmsd = df['RMSD'][ind].values
if np.argmin(loss) == 0:
correct += 1
num = np.arange(loss.shape[0]) + 1
rank_i = num[np.argsort(loss) == 0][0]
rank.append(rank_i)
if rank_i > 1:
print(pdb_id, rmsd[np.argmin(loss)])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
# pl.savefig(f'{root_dir}/fig_3drobot_{data_flag}/{pdb_id}_score.pdf')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
print(rank)
fig = pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rank.pdf')
pl.close(fig)
########################################################
def plot_casp11_loss():
# pdb_list = pd.read_csv('pdb_list_new.txt')['pdb'].values
pdb_list = pd.read_csv('pdb_no_need_copy_native.txt')['pdb'].values
flist = pd.read_csv('list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
df_tm = pd.read_csv('casp11_decoy.csv')
tm_score_dict = {x: y for x, y in zip(df_tm['Target'], df_tm['Decoys'])}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp15'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
data_flag = 'exp61'
if not os.path.exists(f'fig_casp11_{data_flag}'):
os.system(f'mkdir fig_casp11_{data_flag}')
correct = 0
rank = []
tm_score = []
for pdb_id in pdb_list:
data_path = f'data_casp11_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
tm_score.append(tm_score_dict[pdb_id])
loss = df['loss'].values
num = np.arange(loss.shape[0])
i = (decoy_name == f'{pdb_id}.native.pdb')
if num[i] == np.argmin(loss):
# print(num.shape[0] - num[i])
correct += 1
rank.append(num[np.argsort(loss) == num[i]][0] + 1)
fig = pl.figure()
pl.plot(num, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([num[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([num[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([num[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
pl.xlabel('num')
pl.ylabel('energy score')
pl.savefig(f'fig_casp11_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
tm_score = np.array(tm_score)
pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
# pl.figure()
# pl.plot(tm_score, rank, 'bo')
a = (rank <= 5)
b = (rank > 5)
pl.figure()
pl.hist(tm_score[a], bins=np.arange(9)*0.1+0.2, label='rank=1 or 2', histtype='stepfilled')
pl.hist(tm_score[b], bins=np.arange(9)*0.1+0.2, label='rank>10', histtype='step')
pl.xlabel('Best TM-score in decoys')
pl.ylabel('Num')
pl.legend(loc=2)
########################################################
def plot_casp11(data_flag):
# plot RMSD vs. loss for CASP11
root_dir = '/home/hyang/bio/erf/data/decoys/casp11'
pdb_list = pd.read_csv(f'{root_dir}/casp11_rmsd/casp11_rmsd.txt')['pdb']
flist = pd.read_csv(f'{root_dir}/list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp61'
for pdb_id in pdb_list:
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
df2 = pd.read_csv(f'{root_dir}/casp11_rmsd/{pdb_id}_rmsd.csv')
rmsd = df2['rmsd'].values
assert(rmsd.shape[0] == loss.shape[0])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([rmsd[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
a = max(12, rmsd.max())
pl.xlim(-1, a)
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rmsd_{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def prepare_casp13():
# prepare casp13 decoys
df = pd.read_csv('flist.txt')
pdb_count = df['pdb'].value_counts()
pdb_list = []
for pdb, count in zip(pdb_count.index, pdb_count.values):
if count > 1:
pdb_list.append(pdb)
else:
pdb_list.append(pdb + '-D1')
pdb_list = np.array(pdb_list)
pdb_list.sort()
df2 = pd.DataFrame({'pdb': pdb_list})
df2.to_csv('pdb_list.txt', index=False)
def plot_casp13(data_flag, casp_id='casp13', casp_score_type='GDT_TS'):
# plot results of casp13 / casp14 decoys
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}'
if casp_id == 'casp13':
pdb_list = pd.read_csv(f'{root_dir}/pdb_list_domain.txt')['pdb'].values
pdb_ids = [x.split('-')[0] for x in pdb_list]
else:
pdb_list = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
pdb_ids = pdb_list
# data_flag = 'exp61'
# if not os.path.exists(f'fig_casp13_{data_flag}'):
# os.system(f'mkdir fig_casp13_{data_flag}')
pearsonr_list = []
pearsonp_list = []
used_pdb_list = []
casp_score_max = []
casp_score_min = []
rank_1 = 0
for pdb_id, pdb_casp_name in zip(pdb_ids, pdb_list):
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['pdb'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
if not os.path.exists(f'{root_dir}/casp_score/{pdb_casp_name}.txt'):
continue
df2 = pd.read_csv(f'{root_dir}/casp_score/{pdb_casp_name}.txt', sep='\s+')
casp_model = df2['Model']
if (casp_id == 'casp13') & (pdb_casp_name.endswith('-D1')):
casp_model = df2['Model'].apply(lambda x: x[:-3])
if casp_score_type == 'GDT_TS':
casp_score_data = df2['GDT_TS'].values
elif casp_score_type == 'RMSD_CA':
casp_score_data = df2['RMS_CA'].values
else:
raise ValueError('casp score type should be GDT_TS / RMSD_CA')
casp_dict = {x: y for x, y in zip(casp_model, casp_score_data)}
casp_score = []
for x in decoy_name:
try:
casp_score.append(casp_dict[x])
except KeyError:
casp_score.append(-1)
casp_score = np.array(casp_score)
idx = (casp_score > 0) & (loss > 0)
casp_score_good = casp_score[idx]
loss_good = loss[idx]
decoy_name_good = decoy_name[idx]
# if np.argmax(casp_score_good) == np.argmin(loss_good):
# rank_1 += 1
top5_idx = np.argpartition(loss_good, 5)[:5]
best_gdt_idx = np.argmax(casp_score_good)
if best_gdt_idx in top5_idx:
print(best_gdt_idx, top5_idx)
rank_1 += 1
print(pdb_casp_name, decoy_name_good[best_gdt_idx], decoy_name_good[top5_idx])
pearsonr = stats.pearsonr(casp_score_good, loss_good)
pearsonr_list.append(pearsonr[0])
pearsonp_list.append(pearsonr[1])
used_pdb_list.append(pdb_id)
casp_score_max.append(casp_score[idx].max())
casp_score_min.append(casp_score[idx].min())
df_i = pd.DataFrame({'pdb': decoy_name_good, casp_score_type: casp_score_good, 'energy': loss_good})
df_i.to_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_casp_score_{casp_score_type}_energy.csv', index=False)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score[idx], loss[idx], 'bo')
pl.title(f'{pdb_id}')
# a = max(12, rmsd.max())
# pl.xlim(-1, a)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}.pdf')
pl.close(fig)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score_good, loss_good, 'bo')
for i in range(loss_good.shape[0]):
pl.text(casp_score_good[i], loss_good[i], decoy_name_good[i].split('S')[1][:-3], fontsize=6)
pl.title(f'{pdb_id}')
y_min = loss_good.min()
y_max = loss_good.max()
pl.ylim(y_min - (y_max - y_min) * 0.01, y_min + (y_max - y_min) * 0.15)
# a = max(12, rmsd.max())
pl.xlim(0, 100)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}_zoom.pdf')
pl.close(fig)
print(f'rank_1 = {rank_1}')
df = pd.DataFrame({'pdb': used_pdb_list, 'pearsonr': pearsonr_list, 'pearsonp': pearsonp_list,
'casp_score_max': casp_score_max, 'casp_score_min': casp_score_min})
df.to_csv(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.txt', index=False)
fig = pl.figure()
if casp_score_type == 'GDT_TS':
pearsonr_bins = np.arange(11)*0.1-1
elif casp_score_type == 'RMSD_CA':
pearsonr_bins = np.arange(11)*0.1
else:
raise ValueError('casp score type should be gdt_ts / rmsd_ca')
pl.hist(df['pearsonr'], bins=pearsonr_bins)
pl.xlabel(r'Pearson $\rho$')
pl.ylabel('N')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.pdf')
pl.close(fig)
# casp_score_max = df['casp_score_max'].values
# fig = pl.figure()
# idx = (casp_score_max >= 50)
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_1.pdf')
# pl.close(fig)
# fig = pl.figure()
# idx = (casp_score_max < 50)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_2.pdf')
# pl.close(fig)
########################################################
def plot_ru(decoy_set, decoy_loss_dir):
# decoy_set = '4state_reduced'
# decoy_set = 'lattice_ssfit'
# decoy_set = 'lmds'
# decoy_set = 'lmds_v2'
root_dir = f'/home/hyang/bio/erf/data/decoys/rudecoy/multiple/{decoy_set}'
# decoy_loss_dir = 'exp61'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
pdb_list = df['pdb'].values
loss = df['loss'].values
rmsd = df['score'].values
native_name = f'{pdb_id}.pdb'
i_native = np.arange(pdb_list.shape[0])[(pdb_list == native_name)]
i = np.argmin(loss)
print(i_native, i, pdb_list[i])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[i_native]], [loss[i_native]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def plot_md_trj(decoy_loss_dir):
# plot the MD trajectory data
root_dir = f'/home/hyang/bio/openmm/data'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pdb = df['pdb'].values
# plot RMSD vs. Energy
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.plot([rmsd[0]], [loss[0]], 'gs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'g^', markersize=12)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'g.', label='md_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'c.', label='md_T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.subplot(211)
pl.plot(rmsd[idx == 1], 'g', label='md_T300')
pl.plot(rmsd[idx == 2], 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss[idx == 1], 'g')
pl.plot(loss[idx == 2], 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj2():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/cullpdb_val_deep/'
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
# pdb_id_list = ['3KXT']
for pdb_id in pdb_id_list:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_T300_energy_rmsd.csv')
loss1 = df1['energy'].values
rmsd1 = df1['rmsd'].values
df2 = pd.read_csv(f'{root_dir}/{pdb_id}_T500_energy_rmsd.csv')
loss2 = df2['energy'].values
rmsd2 = df2['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'g.', label='T300')
pl.plot(rmsd2, loss2, 'c.', label='T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'g', label='md_T300')
pl.plot(rmsd2, 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'g')
pl.plot(loss2, 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj3():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/BPTI'
df = pd.read_csv(f'{root_dir}/BPTI_energy_rmsd.csv')
loss1 = df['energy'].values
rmsd1 = df['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot(rmsd1, loss1, 'g.', markersize=0.01)
pl.title('BPTI')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/BPTI_score.jpg')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'b.', markersize=0.01)
pl.ylabel('RMSD')
pl.title('BPTI')
pl.subplot(212)
pl.plot(loss1, 'g.', markersize=0.01)
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/BPTI_rmsd_energy_time.jpg')
pl.close(fig)
def plot_bd_trj():
# plot the mixed Langevin dynamics trajectory data
root_dir = '/home/hyang/bio/erf/data/fold/exp205dynamics_val_deep501/'
pdb_selected = pd.read_csv(f'/home/hyang/bio/erf/data/fold/cullpdb_val_deep/sample.csv')['pdb'].values
pdb_selected = np.append(np.array(['1BPI_A']), pdb_selected)
for pdb_id in pdb_selected:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_energy.csv')
loss1 = df1['sample_energy'].values
rmsd1 = df1['sample_rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'go')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'go')
pl.ylabel('RMSD')
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'bs')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_openmm2():
root_dir = f'/home/hyang/bio/openmm/data'
decoy_loss_dir = 'exp63_65'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
fig = pl.figure()
df = | pd.read_csv(f'{root_dir}/exp61/{pdb_id}_decoy_loss.csv') | pandas.read_csv |
import urllib
import pytest
import pandas as pd
from pandas import testing as pdt
from anonympy import __version__
from anonympy.pandas import dfAnonymizer
from anonympy.pandas.utils_pandas import load_dataset
@pytest.fixture(scope="module")
def anonym_small():
df = load_dataset('small')
anonym = dfAnonymizer(df)
return anonym
@pytest.fixture(scope="module")
def anonym_big():
try:
df = load_dataset('big')
anonym = dfAnonymizer(df)
except urllib.error.HTTPError:
anonym = None
return anonym
def test_anonym_obj(anonym_small, anonym_big):
assert isinstance(anonym_small, dfAnonymizer), "should have\
returned `dfAnonymizer` object"
if anonym_big is None:
assert False, "Failed to fetch the DataFrame"
assert isinstance(anonym_big, dfAnonymizer), "should have returned\
`dfAnonymizer` object"
def test_numeric_noise(anonym_small):
output = anonym_small.numeric_noise('age', seed=42, inplace=False)
expected = pd.Series([38, 47], dtype='int64')
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_noise(['age', 'salary'],
seed=42,
inplace=False)
expected = pd.DataFrame({'age': [38, 47],
'salary': [59239.79912097112, 49323.30756879504]})
pdt.assert_frame_equal(expected, output)
def test_numeric_binning(anonym_small):
output = anonym_small.numeric_binning('salary', bins=2, inplace=False)
dtype = pd.CategoricalDtype([
pd.Interval(49315.0, 54279.0, closed='right'),
pd.Interval(54279.0, 59234.0, closed='right')],
ordered=True)
expected = pd.Series([
pd.Interval(54279.0, 59234.0, closed='right'),
pd.Interval(49315.0, 54279.0, closed='right')],
dtype=dtype)
pdt.assert_series_equal(expected, output, check_names=False)
output = anonym_small.numeric_binning(['age', 'salary'],
bins=2,
inplace=False)
dtype2 = pd.CategoricalDtype([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
ordered=True)
ser2 = pd.Series([
pd.Interval(33.0, 40.0, closed='right'),
pd.Interval(40.0, 48.0, closed='right')],
dtype=dtype2)
expected = pd.DataFrame({'age': ser2, 'salary': expected})
| pdt.assert_frame_equal(expected, output) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import mock
import pyarrow
import pandas as pd
from IPython.display import Image
from . import get_fixture_path
from ..encoders import (
registry as full_registry,
DataEncoderRegistry,
JsonEncoder,
TextEncoder,
PandasArrowDataframeEncoder,
)
from ..exceptions import (
ScrapbookDataException,
ScrapbookInvalidEncoder,
ScrapbookMissingEncoder,
)
from ..scraps import Scrap
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
@pytest.mark.parametrize(
"test_input,expected",
[
(
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
),
(
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="json"),
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
),
(
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
),
(
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="json"),
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
),
(
Scrap(name="foo", data=u'["😍"]', encoder="json"),
Scrap(name="foo", data=["😍"], encoder="json"),
),
],
)
def test_json_decode(test_input, expected):
assert JsonEncoder().decode(test_input) == expected
@pytest.mark.parametrize(
"test_input",
[
Scrap(name="foo", data="", encoder="json"),
Scrap(name="foo", data='{"inavlid","json"}', encoder="json"),
Scrap(name="foo", data="😍", encoder="json"),
],
)
def test_json_decode_failures(test_input):
# If it can't decode, leaves the string as expected
assert JsonEncoder().decode(test_input) == test_input
@pytest.mark.parametrize(
"test_input,expected",
[
(
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
),
(
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="json"),
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="json"),
),
(
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
),
(
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="json"),
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="json"),
),
(
Scrap(name="foo", data=u'["😍"]', encoder="json"),
Scrap(name="foo", data=["😍"], encoder="json"),
),
],
)
def test_json_encode(test_input, expected):
assert JsonEncoder().encode(test_input) == expected
@pytest.mark.parametrize(
"test_input",
[
Scrap(name="foo", data="", encoder="json"),
Scrap(name="foo", data='{"inavlid","json"}', encoder="json"),
Scrap(name="foo", data="😍", encoder="json"),
],
)
def test_json_encode_failures(test_input):
with pytest.raises(JSONDecodeError):
JsonEncoder().encode(test_input)
class Dummy(object):
def __str__(self):
return "foo"
@pytest.mark.parametrize(
"test_input,expected",
[
(
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="text"),
Scrap(name="foo", data=str({"foo": "bar", "baz": 1}), encoder="text"),
),
(
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="text"),
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="text"),
),
(
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="text"),
Scrap(name="foo", data="['foo', 'bar', 1, 2, 3]", encoder="text"),
),
(
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="text"),
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="text"),
),
(
Scrap(name="foo", data=Dummy(), encoder="text"),
Scrap(name="foo", data="foo", encoder="text"),
),
(Scrap(name="foo", data="😍", encoder="text"), Scrap(name="foo", data="😍", encoder="text")),
],
)
def test_text_decode(test_input, expected):
assert TextEncoder().decode(test_input) == expected
@pytest.mark.parametrize(
"test_input,expected",
[
(
Scrap(name="foo", data={"foo": "bar", "baz": 1}, encoder="text"),
Scrap(name="foo", data=str({"foo": "bar", "baz": 1}), encoder="text"),
),
(
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="text"),
Scrap(name="foo", data='{"foo":"bar","baz":1}', encoder="text"),
),
(
Scrap(name="foo", data=["foo", "bar", 1, 2, 3], encoder="text"),
Scrap(name="foo", data="['foo', 'bar', 1, 2, 3]", encoder="text"),
),
(
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="text"),
Scrap(name="foo", data='["foo","bar",1,2,3]', encoder="text"),
),
(
Scrap(name="foo", data=Dummy(), encoder="text"),
Scrap(name="foo", data="foo", encoder="text"),
),
(Scrap(name="foo", data="😍", encoder="text"), Scrap(name="foo", data="😍", encoder="text")),
],
)
def test_text_encode(test_input, expected):
assert TextEncoder().encode(test_input) == expected
@pytest.mark.parametrize(
"test_input",
[
(
Scrap(
name="foo",
data=pd.DataFrame(
data={
"foo": pd.Series(["bar"], dtype='str'),
"baz": pd.Series([1], dtype='int'),
}
),
encoder="pandas",
)
),
(
Scrap(
name="foo",
data=pd.DataFrame(
data={
"foo": pd.Series(["😍", "emoji"], dtype='str'),
"baz": pd.Series(["no", "unicode"], dtype='str'),
}
),
encoder="pandas",
)
),
# Nested lists of lists of strings are ok
(
Scrap(
name="foo",
data=pd.DataFrame(data={"foo": pd.Series([[["foo", "bar"]]], dtype='object')}),
encoder="pandas",
)
),
# String objects are ok
(
Scrap(
name="foo",
data=pd.DataFrame(data={"foo": pd.Series(["bar"], dtype='object')}),
encoder="pandas",
)
),
],
)
def test_pandas_encode_and_decode(test_input):
scrap = PandasArrowDataframeEncoder().encode(test_input)
scrap_back = PandasArrowDataframeEncoder().decode(scrap)
pd.testing.assert_frame_equal(scrap_back.data, test_input.data)
assert scrap.name == test_input.name
assert scrap_back.name == test_input.name
assert scrap.encoder == test_input.encoder
assert scrap_back.encoder == test_input.encoder
@pytest.mark.parametrize(
"test_input,exception_type",
[
# Dicts can't convert
(
Scrap(
name="foo",
data=pd.DataFrame(data={"foo": pd.Series([{"foo": "bar"}], dtype='object')}),
encoder="pandas",
),
pyarrow.lib.ArrowNotImplementedError,
),
# Sets can't convert
(
Scrap(
name="foo",
data=pd.DataFrame(data={"foo": | pd.Series([{"foo", "bar"}], dtype='object') | pandas.Series |
# Package imports
import pandas as pd
import requests
import datetime
from unidecode import unidecode as UnicodeFormatter
import os
import bcolors
# Local imports
import path_configuration
import url_configuration
import progress_calculator
class GrandPrix(object):
Url = None
Path = None
Requests = None
def __init__(self):
self.Url = url_configuration.Url_builder()
self.Path = path_configuration.Path()
self.Requests = requests
def import_grand_prix(self):
content = os.listdir(self.Path.get_season_path())
content.sort()
"""for year in content:
DataFrame = pd.read_csv(Path.get_season_path()+year)
print(DataFrame)"""
DataFrame = pd.read_csv(self.Path.get_season_path()+'2019.csv')
Date = list(DataFrame['Date'])
GrandPrix = list(DataFrame['Grand Prix'])
Round = list(DataFrame['Round'])
Date_obj = []
# DATE OBJ
for date in Date:
Date_obj.append(datetime.datetime.strptime(date, '%Y-%m-%d'))
Progress = progress_calculator.ProgressBar(Round)
# WHILE - BY GPS OF THE YEAR
i = 0
while i < Round.__len__():
# CHECK YEAR
if Date_obj[i] < datetime.datetime.now():
# METHOD CALLS
print(bcolors.PASS + 'STARTING EXTRACTOR, GETTING FROM', GrandPrix[i], 'DATE:', Date[i] + bcolors.END)
self.drivers_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.contructors_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.pitstops_times_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.result_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.by_lap_csv(Round[i], Date_obj[i].year, GrandPrix[i])
self.current_driver_standings(Round[i], Date_obj[i].year, GrandPrix[i])
self.status(Round[i], Date_obj[i].year, GrandPrix[i])
if Date_obj[i].year > 2017:
url = self.Url.f1_url(Date_obj[i].year, Date_obj[i].date(), GrandPrix[i])
self.load_data_from_f1(url, Date_obj[i].year, GrandPrix[i])
Progress.get_progress_bar()
i = i + 1
def drivers_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVERS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_driver(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['DriverTable']
Drivers = json['Drivers']
DriversID = []
DriversInitials = []
DriversName = []
YearsOld = []
for driver in Drivers:
DriversID.append(driver['driverId'])
DriversInitials.append(driver['code'])
DriversName.append(UnicodeFormatter(driver['givenName']+' '+driver['familyName']))
YearsOld.append(
datetime.datetime.now().year - datetime.datetime.strptime(driver['dateOfBirth'], '%Y-%m-%d').year
)
Drivers_Dict = {'Driver ID': DriversID, 'Driver Initials': DriversInitials,
'Driver Name': DriversName, 'Years Old': YearsOld}
Drivers_Data = pd.DataFrame(data=Drivers_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Drivers')
Drivers_Data.to_csv(Path)
def contructors_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING CONSTRUCTORS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_constructor(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['ConstructorTable']
Constructors = json['Constructors']
ConstructorID = []
ConstructorName = []
for constructor in Constructors:
ConstructorID.append(constructor['constructorId'])
ConstructorName.append(constructor['name'])
Constructors_Dict = {"Constructor ID": ConstructorID, "Constructor Name": ConstructorName}
Constructor_Data = pd.DataFrame(data=Constructors_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'Constructors')
Constructor_Data.to_csv(Path)
def pitstops_times_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING PITSTOPS BY RACE...', gp_name + bcolors.END)
url = self.Url.url_pitstops_time(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
PitStops = Race['PitStops']
DriverID = []
Corresponding_Lap = []
Driver_Stop_Number = []
PitStop_Time = []
for pitstop in PitStops:
DriverID.append(pitstop['driverId'])
Corresponding_Lap.append(pitstop['lap'])
Driver_Stop_Number.append(pitstop['stop'])
PitStop_Time.append(pitstop['duration'])
PitStop_Dict = {'Pit Stop Lap': Corresponding_Lap, 'Driver ID': DriverID, 'Pit Stop Number': Driver_Stop_Number,
'Pit Stop Time': PitStop_Time}
PitStop_Data = pd.DataFrame(data=PitStop_Dict)
Path = self.Path.grandprix_path(year, gp_name, 'PitStop')
PitStop_Data.to_csv(Path)
def result_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING RESULT BY RACE...', gp_name + bcolors.END)
url = self.Url.url_results(round, year)
page = self.Requests.get(url)
json = page.json()
json = json['MRData']
json = json['RaceTable']
Race = json['Races'][0]
Results = Race['Results']
DriverPosition = []
DriverGridPosition = []
DriverID = []
ConstructorID = []
TimeToLeader = []
RaceStatus = []
FastestLapRank = []
AverageSpeed = []
for result in Results:
# DRIVER POSITION
if result['positionText'] == 'R':
DriverPosition.append(None)
else:
DriverPosition.append(result['positionText'])
# GRID
DriverGridPosition.append(result['grid'])
# DRIVER ID
DriverID.append(result['Driver']['driverId'])
# CONSTRUCTOR ID
ConstructorID.append(result['Constructor']['constructorId'])
# TIME TO LEADER
if result['position'] == '1':
TimeToLeader.append("0")
elif result['status'] != 'Finished':
Check = result['status']
if Check[0] == '+':
TimeToLeader.append(result['status'])
else:
TimeToLeader.append(None)
else:
TimeToLeader.append(result['Time']['time'])
# RACE STATUS
if result['status'][0] == '+':
RaceStatus.append('Finished')
else:
RaceStatus.append(result['status'])
# CASE THE DRIVER GET OUT OF RACE WITHOUT DO ONE LAP
if 'FastestLap' not in result:
# RANK FASTEST LAP
FastestLapRank.append(None)
# AVERAGE SPEED
AverageSpeed.append(None)
else:
# RANK FASTEST LAP
FastestLapRank.append(result['FastestLap']['rank'])
# AVERAGE SPEED
AverageSpeed.append(result['FastestLap']['AverageSpeed']['speed'])
Initial_Ps_Dict = {'Positions': DriverGridPosition, 'DriverID': DriverID}
Initial_Ps_Data = pd.DataFrame(data=Initial_Ps_Dict)
Initial_Ps_Data = Initial_Ps_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'InitialPositions')
Initial_Ps_Data.to_csv(Path)
Result_Dict = {'Positions': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Time to Leader': TimeToLeader, 'Status': RaceStatus,
'Fastest Rank': FastestLapRank, 'Average Speed': AverageSpeed}
Result_Data = pd.DataFrame(data=Result_Dict)
Result_Data = Result_Data.set_index('Positions')
Path = self.Path.grandprix_path(year, gp_name, 'Result')
Result_Data.to_csv(Path)
def by_lap_csv(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING LAP TIMES AND POSITIONS BY RACE...', gp_name + bcolors.END)
# Progress Calculator
Progress = progress_calculator.ProgressBar(True)
# URL
url_1, url_2 = self.Url.url_lapbylap(round, year)
# LAP COUNTER
Lap_Counter = 1
# LAP VALIDATOR
Lap_v = True
# DRIVER LIST
driver_list = list(pd.read_csv(self.Path.grandprix_path(year, gp_name, 'Drivers'))['Driver ID'].values)
# DRIVERS DICT
Lap_Times_Dict = {}
Lap_Positions_Dict = {}
# START VALUES
Lap_Times_Dict['Driver ID'] = driver_list
Lap_Positions_Dict['Driver ID'] = driver_list
while Lap_v:
# PROGRESS
Progress.get_progress_counter(Lap_Counter)
# DRIVERS LIST
Lap_Times = []
Lap_Positions = []
page = self.Requests.get(url_1 + str(Lap_Counter) + url_2)
json = page.json()
json = json['MRData']
if int(json['total']) == 0:
Lap_v = False
else:
jtemp = json['RaceTable']
jtemp = jtemp['Races'][0]
jtemp = jtemp['Laps'][0]
Laps = jtemp['Timings']
for driver in driver_list:
Driver_Out_Checker = True
for lap in Laps:
if driver == lap['driverId']:
Driver_Out_Checker = False
Lap_Times.append(lap['time'])
Lap_Positions.append(lap['position'])
if Driver_Out_Checker:
Lap_Times.append(None)
Lap_Positions.append(None)
Lap_Times_Dict[Lap_Counter] = Lap_Times
Lap_Positions_Dict[Lap_Counter] = Lap_Positions
Lap_Counter = Lap_Counter + 1
Lap_Times_Data = pd.DataFrame(data=Lap_Times_Dict)
Lap_Times_Data = Lap_Times_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'TimesByLap')
Lap_Times_Data.to_csv(Path)
Lap_Positions_Data = pd.DataFrame(data=Lap_Positions_Dict)
Lap_Positions_Data = Lap_Positions_Data.set_index('Driver ID')
Path = self.Path.grandprix_path(year, gp_name, 'PositionsByLap')
Lap_Positions_Data.to_csv(Path)
def current_driver_standings(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING DRIVER STANDINGS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_driver_standings(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StandingsTable']
json = json['StandingsLists'][0]
DriverStandings = json['DriverStandings']
# STARTING LISTS
DriverPosition = []
DriverPoints = []
DriverWins = []
DriverID = []
ConstructorID = []
for driver in DriverStandings:
DriverPosition.append(driver['position'])
DriverPoints.append(driver['points'])
DriverWins.append(driver['wins'])
DriverID.append(driver['Driver']['driverId'])
ConstructorID.append(driver['Constructors'][-1]['constructorId'])
DriverStandingsDict = {'Position': DriverPosition, 'DriverID': DriverID, 'ConstructorID': ConstructorID,
'Wins': DriverWins, 'Points': DriverPoints}
DriverStandingsData = pd.DataFrame(data=DriverStandingsDict)
DriverStandingsData = DriverStandingsData.set_index('Position')
Path = self.Path.standings_path(year)
DriverStandingsData.to_csv(Path)
def status(self, round, year, gp_name):
print(bcolors.ITALIC + 'GETTING STATUS FROM ERGAST...', gp_name + bcolors.END)
url = self.Url.url_status(round, year)
# LOAD JSON
page = requests.get(url)
json = page.json()
json = json['MRData']
json = json['StatusTable']
Status = json['Status']
# STARTING LISTS
StatusID = []
StatusDescription = []
StatusOccurrences = []
for state in Status:
StatusID.append(state['statusId'])
StatusDescription.append(state['status'])
StatusOccurrences.append(state['count'])
StatusDict = {'StatusID': StatusID, 'Status Description': StatusDescription,
'Status Occurrences': StatusOccurrences}
StatusData = pd.DataFrame(data=StatusDict)
StatusData = StatusData.set_index('StatusID')
Path = self.Path.grandprix_path(year, gp_name, 'RaceStatus')
StatusData.to_csv(Path)
def load_data_from_f1(self, url, year, gp_name):
print(bcolors.ITALIC + 'GETTING SOME DATA FROM F1...', gp_name + bcolors.END)
page = requests.get(url)
json = page.json()
def for_loop_by_time(json):
Time = []
Something = []
i = 0
for value in json:
if i == 0:
Time.append(value)
i = 1
else:
Something.append(value)
i = 0
return Time, Something
def weather(json):
json = json['Weather']
json = json['graph']
weather_data = json['data']
def temperature(json):
def temp_df(json, description):
Time, Temp = for_loop_by_time(json)
TrackTempDict = {"Time": Time, description: Temp}
TrackTempData = pd.DataFrame(data=TrackTempDict)
TrackTempData = TrackTempData.set_index('Time')
return TrackTempData
def track_temp(json):
print(bcolors.ITALIC + 'GETTING TRACK TEMP FROM F1...', gp_name + bcolors.END)
json = json['pTrack']
TrackTempData = temp_df(json, "Track Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'TrackTemp')
TrackTempData.to_csv(Path)
def air_temp(json):
print(bcolors.ITALIC + 'GETTING AIR TEMP FROM F1...', gp_name + bcolors.END)
json = json['pAir']
TrackTempData = temp_df(json, "Air Temperature")
Path = self.Path.grandprix_path(year, gp_name, 'AirTemp')
TrackTempData.to_csv(Path)
track_temp(json)
air_temp(json)
def is_raining(json):
print(bcolors.ITALIC + 'GETTING WEATHER FROM F1...', gp_name + bcolors.END)
json = json['pRaining']
Time, Raining = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Is Raining": Raining}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Raining')
TrackTempData.to_csv(Path)
def wind_speed(json):
print(bcolors.ITALIC + 'GETTING WIND SPEED FROM F1...', gp_name + bcolors.END)
json = json['pWind Speed']
Time, Wind_Speed = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Speed": Wind_Speed}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Speed')
TrackTempData.to_csv(Path)
def wind_direction(json):
print(bcolors.ITALIC + 'GETTING WIND DIRECTION FROM F1...', gp_name + bcolors.END)
json = json['pWind Dir']
Time, Wind_Direction = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Wind Direction": Wind_Direction}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Wind_Direction')
TrackTempData.to_csv(Path)
def humidity(json):
print(bcolors.ITALIC + 'GETTING HUMIDITY FROM F1...', gp_name + bcolors.END)
json = json['pHumidity']
Time, Humidity = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Humidity": Humidity}
TrackTempData = pd.DataFrame(data=TrackTemp)
TrackTempData = TrackTempData.set_index('Time')
Path = self.Path.grandprix_path(year, gp_name, 'Humidity')
TrackTempData.to_csv(Path)
def air_pressure(json):
print(bcolors.ITALIC + 'GETTING AIR PRESSURE FROM F1...', gp_name + bcolors.END)
json = json['pPressure']
Time, Air_Pressure = for_loop_by_time(json)
TrackTemp = {"Time": Time, "Air Pressure": Air_Pressure}
TrackTempData = | pd.DataFrame(data=TrackTemp) | pandas.DataFrame |
# Import necessary libraries
import json
import joblib
import pandas as pd
import streamlit as st
# Machine Learning
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
# Custom classes
from ..utils import isNumerical
import os
def app():
"""This application helps in running machine learning models without having to write explicit code
by the user. It runs some basic models and let's the user select the X and y variables.
"""
# Load the data
if 'main_data.csv' not in os.listdir('data'):
st.markdown("Please upload data through `Upload Data` page!")
else:
data = pd.read_csv('data/main_data.csv')
# Create the model parameters dictionary
params = {}
# Use two column technique
# col1, col2 = st.beta_columns(2)
col1, col2 = st.columns(2)
# Design column 1
y_var = col1.radio("Select the variable to be predicted (y)", options=data.columns)
# Design column 2
X_var = col2.multiselect("Select the variables to be used for prediction (X)", options=data.columns)
# Check if len of x is not zero
if len(X_var) == 0:
st.error("You have to put in some X variable and it cannot be left empty.")
# Check if y not in X
if y_var in X_var:
st.error("Warning! Y variable cannot be present in your X-variable.")
# Option to select predition type
pred_type = st.radio("Select the type of process you want to run.",
options=["Regression", "Classification"],
help="Write about reg and classification")
# Add to model parameters
params = {
'X': X_var,
'y': y_var,
'pred_type': pred_type,
}
# if st.button("Run Models"):
st.write(f"**Variable to be predicted:** {y_var}")
st.write(f"**Variable to be used for prediction:** {X_var}")
# Divide the data into test and train set
X = data[X_var]
y = data[y_var]
# Perform data imputation
# st.write("THIS IS WHERE DATA IMPUTATION WILL HAPPEN")
# Perform encoding
X = pd.get_dummies(X)
# Check if y needs to be encoded
if not isNumerical(y):
le = LabelEncoder()
y = le.fit_transform(y)
# Print all the classes
st.write("The classes and the class allotted to them is the following:-")
classes = list(le.classes_)
for i in range(len(classes)):
st.write(f"{classes[i]} --> {i}")
# Perform train test splits
st.markdown("#### Train Test Splitting")
size = st.slider("Percentage of value division",
min_value=0.1,
max_value=0.9,
step = 0.1,
value=0.8,
help="This is the value which will be used to divide the data for training and testing. Default = 80%")
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=size, random_state=42)
st.write("Number of training samples:", X_train.shape[0])
st.write("Number of testing samples:", X_test.shape[0])
# Save the model params as a json file
with open('data/metadata/model_params.json', 'w') as json_file:
json.dump(params, json_file)
''' RUNNING THE MACHINE LEARNING MODELS '''
if pred_type == "Regression":
st.write("Running Regression Models on Sample")
# Table to store model and accurcy
model_r2 = []
# Linear regression model
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
lr_r2 = lr_model.score(X_test, y_test)
model_r2.append(['Linear Regression', lr_r2])
# Decision Tree model
dt_model = DecisionTreeRegressor()
dt_model.fit(X_train, y_train)
dt_r2 = dt_model.score(X_test, y_test)
model_r2.append(['Decision Tree Regression', dt_r2])
# Save one of the models
if dt_r2 > lr_r2:
# save decision tree
joblib.dump(dt_model, 'data/metadata/model_reg.sav')
else:
joblib.dump(lr_model, 'data/metadata/model_reg.sav')
# Make a dataframe of results
results = pd.DataFrame(model_r2, columns=['Models', 'R2 Score']).sort_values(by='R2 Score', ascending=False)
st.dataframe(results)
if pred_type == "Classification":
st.write("Running Classfication Models on Sample")
# Table to store model and accurcy
model_acc = []
# Linear regression model
lc_model = LogisticRegression()
lc_model.fit(X_train, y_train)
lc_acc = lc_model.score(X_test, y_test)
model_acc.append(['Linear Regression', lc_acc])
# Decision Tree model
dtc_model = DecisionTreeClassifier()
dtc_model.fit(X_train, y_train)
dtc_acc = dtc_model.score(X_test, y_test)
model_acc.append(['Decision Tree Regression', dtc_acc])
# Save one of the models
if dtc_acc > lc_acc:
# save decision tree
joblib.dump(dtc_model, 'data/metadata/model_classification.sav')
else:
joblib.dump(lc_model, 'data/metadata/model_classificaton.sav')
# Make a dataframe of results
results = | pd.DataFrame(model_acc, columns=['Models', 'Accuracy']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 21 09:52:43 2018
@author: <NAME> 2016EEZ8350
"""
import os
import re
import logging
import numpy as np
import pandas as pd
from glob import glob1
from PlotUtils import *
class Timit:
_COMMENT_RE = re.compile("^;");
_SPKR_COLS = ['id','sex','dr','use','recdate','birthdate',
'ht','race','edu','comments'];
_DR_RE = re.compile('^dr\d+$');
_DR_PREFIX = 'dr';
def __init__(self,root,verbose=False):
assert os.path.exists(root), "Root folder does not exist"
logging.info('Initializing Timit corpus from '+root);
self._root = root;
vocab = os.path.join(root,"doc","TIMITDIC.TXT")
spkrinfo = os.path.join(root,"doc","SPKRINFO.TXT")
prompts = os.path.join(root,"doc","PROMPTS.TXT")
self.init_dictionary(vocab);
self.init_spkrinfo(spkrinfo);
self.init_sentences(prompts);
self.init_files(verbose=verbose);
self.silence_phoneme = 'h#';
def get_silence_phoneme(self):
return self.silence_phoneme
def _is_comment(self,line):
return self._COMMENT_RE.search(line)!=None
def init_dictionary(self,vocab):
logging.info('Start parsing dictionary')
assert os.path.exists(vocab), "Missing vocab dict: "+vocab
f = open(vocab, 'r');
linecnt = 0; rows = [];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
rline=re.sub("/","",line);
rline=re.sub("\d+","",rline);
wlist = rline.split();
if len(wlist)<2:
msg = 'Incomplete dict entry @%d : %s'
logging.warn(msg,linecnt,line); continue
rows.append([wlist[0], ' '.join(wlist[1:])])
f.close();
df = pd.DataFrame(data=rows,columns=["word","phnseq"]);
assert df.shape[0]>0, "Invalid dictionary no valid entry found"
self._vocab = vocab; self._vocabdf = df;
df.set_index('word',inplace=True);
logging.info("Read %d words from dictionary",df.shape[0])
def init_spkrinfo(self,spkrinfo):
logging.info('Start parsing speaker information')
assert os.path.exists(spkrinfo), "Missing speaker info: "+spkrinfo
f = open(spkrinfo,"r"); linecnt=0; rows=[];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
wlist = line.split();
if len(wlist)<9:
msg = 'Incomplete speaker entry @%d : %s'
logging.warn(msg,linecnt,line); continue
row = wlist[0:9]; row.append(' '.join(wlist[9:]));
row[0]=row[0].lower();
rows.append(row);
f.close()
assert len(rows)>0, "No valid speaker entry found"
df = pd.DataFrame(data=rows,columns=self._SPKR_COLS);
df.set_index('id',inplace=True);
self._spkrinfo = spkrinfo; self._spkrdf = df;
logging.info('Read information for %d speakers',df.shape[0]);
def init_sentences(self,prompts):
assert os.path.exists(prompts), "Missing sentence files: "+prompts
f = open(prompts,"r"); linecnt=0; rows=[];
for line in list(f):
linecnt+=1;
if self._is_comment(line): continue
r = re.compile('\(.+\)');
if not r.search(line):
msg = 'sentence id not found @%d %s';
logging.warn(msg,linecnt,line);
continue;
wlist = line.split();
i = re.sub('[()]',"",wlist[-1]);
c = re.sub('[()\d]',"",wlist[-1]);
row = [i,c,' '.join(wlist[0:-1])];
rows.append(row);
f.close();
assert len(rows)>0, "No valid sentence found"
logging.info('Read %d sentences',len(rows));
df = pd.DataFrame(data=rows,columns=['id','type','sentence']);
df.set_index('id',inplace=True);
self._sentfile = prompts; self._sentdf = df;
def get_dialect_regions(self):
assert hasattr(self,'_spkrdf'), "Speaker info is not initialized"
return ['dr'+x for x in self._spkrdf.dr.unique()];
def has_speaker(self,spkr):
assert hasattr(self,'_spkrdf'), "Speaker info is not initialized"
return spkr in self._spkrdf.index
def get_speaker_use(self,spkr):
if self.has_speaker(spkr):
return self._spkrdf.loc[spkr]['use']
def get_region_id(self,name):
if self._DR_RE.search(name): return name[2:];
def init_files(self,verbose=False):
dirs = glob1(self._root,'dr*');
# May need this for linux but windows is case insensitive
#dirs+=glob1(self._root,'DR*');
rows = []; f = open('timit_corpus_parsing.log',mode='w');
assert len(dirs)>0, "No dialect region directory division found dr*"
logging.info("Start initializing corpus files")
for drd in dirs:
logging.info("Parsing files for dialect dir %s",drd);
drid = int(self.get_region_id(drd));
drp = os.path.join(self._root,drd);
# First character is 'f' - female, 'm'- male
spkrdirs = glob1(drp,'[fmFM]*');
for spkd in spkrdirs:
sex = spkd[0]; spkr = spkd[1:];
spkp = os.path.join(drp,spkd);
# Get waves and check for wrd and phn files
wavfiles = glob1(spkp,'*.wav');
for wav in wavfiles:
senid = wav[0:-4]; wavp = os.path.join(spkp,wav);
phn = senid+'.phn'; wrd = senid+'.wrd';
phnp = os.path.join(spkp,phn);
wrdp = os.path.join(spkp,wrd);
if not (os.path.exists(phnp) and os.path.exists(wrdp)):
logging.warn('Could not find wrd or phn file '+spkp);
continue;
row = [drid,spkr,sex,senid,wavp,phnp,wrdp]
# Check for overlap in wrd and phn both and report
if self.has_overlap(phnp):
msg = "Phone boundaries overlap. "+ \
"Dropping entry %s" % str(row)
if verbose: logging.warn(msg);
f.write(msg+"\n");
elif self.has_overlap(wrdp):
msg = "Word boundaries overlap. "+\
"Dropping entry %s" % str(row)
if verbose: logging.warn(msg);
f.write(msg+"\n");
else:
spkr_use = self.get_speaker_use(spkr);
train = True if spkr_use=='TRN' else False
test = not train; valid = False;
duration = self._get_duration(phnp);
row+=[duration,train,valid,test];
rows.append(row);
assert len(rows)>0, "No valid data found in dataset "+self._root;
cols = ['dr','spkrid','sex','senid','wav','phn','wrd','duration',
'training','validation','testing'];
df = | pd.DataFrame(rows,columns=cols) | pandas.DataFrame |
from typing import List, Tuple
import pandas as pd
class AgodaCancellationPreprocessor:
SATURDAY = 5
def __init__(self, full_data: pd.DataFrame):
self.number_of_times_customer_canceled = dict()
for id, cancellation in full_data[
["h_customer_id", "cancellation_datetime"]].itertuples(
index=False):
if cancellation == 0:
if id in self.number_of_times_customer_canceled:
self.number_of_times_customer_canceled[id] += 1
else:
self.number_of_times_customer_canceled[id] = 1
self.average_cancellation_days_from_booking = dict()
self.average_cancellation_days_to_checkin = dict()
dates = pd.DataFrame([])
dates["cancellation_datetime"] = pd.to_datetime(
full_data["cancellation_datetime"])
dates["booking_datetime"] = pd.to_datetime(
full_data["booking_datetime"])
dates["checkin_date"] = pd.to_datetime(full_data["checkin_date"])
for id, cancellation, booking_date, checkin_date in pd.concat(
[full_data[
"h_customer_id"], dates["cancellation_datetime"],
dates["booking_datetime"], dates["checkin_date"]],
axis=1).itertuples(
index=False):
if cancellation == 0:
if id in self.average_cancellation_days_from_booking:
self.average_cancellation_days_from_booking[id] += (
cancellation - booking_date).days / \
self.number_of_times_customer_canceled[
id]
self.average_cancellation_days_to_checkin[id] += (
checkin_date - cancellation).days / \
self.number_of_times_customer_canceled[
id]
else:
self.average_cancellation_days_from_booking[id] = (
cancellation - booking_date).days / \
self.number_of_times_customer_canceled[
id]
self.average_cancellation_days_to_checkin[id] = (
checkin_date - cancellation).days / \
self.number_of_times_customer_canceled[
id]
def preprocess(self, full_data: pd.DataFrame) -> Tuple[
pd.DataFrame, List[pd.DataFrame]]:
# take features that need no processing.
features = full_data[["h_booking_id",
"hotel_star_rating",
"guest_is_not_the_customer",
"no_of_adults",
"no_of_children",
"no_of_extra_bed",
"no_of_room",
"original_selling_amount",
"is_user_logged_in",
"is_first_booking",
"request_nonesmoke",
"request_latecheckin",
"request_highfloor",
"request_largebed",
"request_twinbeds",
"request_airport",
"request_earlycheckin"]].copy()
# add time related features
booking_date = pd.to_datetime(full_data["booking_datetime"])
checkin_date = | pd.to_datetime(full_data["checkin_date"]) | pandas.to_datetime |
#!/usr/bin/env python
import os,sys
import pandas as pd
import argparse
daismdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,daismdir)
import daism.modules.simulation as simulation
import daism.modules.training as training
import daism.modules.prediction as prediction
#--------------------------------------
#--------------------------------------
# main()
parser = argparse.ArgumentParser(description='DAISM-XMBD deconvolution.')
subparsers = parser.add_subparsers(dest='subcommand', help='Select one of the following sub-commands')
# create the parser for the "one-stop DAISM-DNN" command
parser_a = subparsers.add_parser('DAISM', help='one-stop DAISM-XMBD',description="one-stop DAISM-XMBD")
parser_a.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_a.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_a.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_a.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_a.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_a.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_a.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_a.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "DAISM simulation" command
parser_b = subparsers.add_parser('DAISM_simulation', help='training set simulation using DAISM strategy',description='training set simulation using DAISM strategy.')
parser_b.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_b.add_argument("-caliexp", type=str, help="Calibration samples expression file", default=None)
parser_b.add_argument("-califra", type=str, help="Calibration samples ground truth file", default=None)
parser_b.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_b.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_b.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_b.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "Generic simulation" command
parser_c = subparsers.add_parser('Generic_simulation', help='training set simulation using purified cells only',description='training set simulation using purified cells only.')
parser_c.add_argument("-platform", type=str, help="Platform of calibration data, [R]: RNA-seq TPM, [S]: single cell RNA-seq", default="S")
parser_c.add_argument("-aug", type=str, help="Purified samples expression (h5ad)", default=None)
parser_c.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_c.add_argument("-N", type=int, help="Simulation samples number", default=16000)
parser_c.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "training" command
parser_d = subparsers.add_parser('training', help='train DNN model',description='train DNN model.')
parser_d.add_argument("-trainexp", type=str, help="Simulated samples expression file", default=None)
parser_d.add_argument("-trainfra", type=str, help="Simulated samples ground truth file", default=None)
parser_d.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_d.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
# create the parser for the "prediction" command
parser_e = subparsers.add_parser('prediction', help='predict using a trained model',description='predict using a trained model.')
parser_e.add_argument("-testexp", type=str, help="Test samples expression file", default=None)
parser_e.add_argument("-model", type=str, help="Deep-learing model file trained by DAISM", default="../output/DAISM_model.pkl")
parser_e.add_argument("-celltype", type=str, help="Model celltypes", default="../output/DAISM_model_celltypes.txt")
parser_e.add_argument("-feature", type=str, help="Model feature", default="../output/DAISM_model_feature.txt")
parser_e.add_argument("-net", type=str, help="Network architecture used for training", default="coarse")
parser_e.add_argument("-outdir", type=str, help="Output result file directory", default="../output/")
class Options:
random_seed = 777
min_f = 0.01
max_f = 0.99
lr = 1e-4
batchsize = 64
num_epoches = 500
ncuda = 0
def main():
# parse some argument lists
inputArgs = parser.parse_args()
if os.path.exists(inputArgs.outdir)==False:
os.mkdir(inputArgs.outdir)
#### DAISM modules ####
if (inputArgs.subcommand=='DAISM'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = pd.read_csv(inputArgs.califra, sep="\t", index_col=0)
# Load test data
test_sample = pd.read_csv(inputArgs.testexp, sep="\t", index_col=0)
# Preprocess purified data
mode = "daism"
commongenes,caliexp,C_all = simulation.preprocess_purified(inputArgs.aug,inputArgs.platform,mode,test_sample,caliexp,califra)
# Create training dataset
mixsam, mixfra, celltypes, feature = simulation.daism_simulation(caliexp,califra,C_all,Options.random_seed,inputArgs.N,inputArgs.platform,Options.min_f,Options.max_f)
# Save signature genes and celltype labels
if os.path.exists(inputArgs.outdir+"/output/")==False:
os.mkdir(inputArgs.outdir+"/output/")
pd.DataFrame(feature).to_csv(inputArgs.outdir+'/output/DAISM_feature.txt',sep='\t')
pd.DataFrame(celltypes).to_csv(inputArgs.outdir+'/output/DAISM_celltypes.txt',sep='\t')
print('Writing training data...')
# Save training data
mixsam.to_csv(inputArgs.outdir+'/output/DAISM_mixsam.txt',sep='\t')
mixfra.to_csv(inputArgs.outdir+'/output/DAISM_mixfra.txt',sep='\t')
# Training model
model = training.dnn_training(mixsam,mixfra,Options.random_seed,inputArgs.outdir+"/output/",Options.num_epoches,Options.lr,Options.batchsize,Options.ncuda,inputArgs.net)
# Save signature genes and celltype labels
pd.DataFrame(list(mixfra.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_celltypes.txt',sep='\t')
pd.DataFrame(list(mixsam.index)).to_csv(inputArgs.outdir+'/output/DAISM_model_feature.txt',sep='\t')
# Prediction
result = prediction.dnn_prediction(model, test_sample, list(mixfra.index), list(mixsam.index),Options.ncuda)
# Save predicted result
result.to_csv(inputArgs.outdir+'/output/DAISM_result.txt',sep='\t')
############################
#### simulation modules ####
############################
#### DAISM simulation modules ####
if (inputArgs.subcommand=='DAISM_simulation'):
# Load calibration data
caliexp = pd.read_csv(inputArgs.caliexp, sep="\t", index_col=0)
califra = | pd.read_csv(inputArgs.califra, sep="\t", index_col=0) | pandas.read_csv |
import json
import pandas as pd
from collections import OrderedDict
import numpy as np
from baseline.utils import listify
__all__ = ["log2json", "order_json"]
def log2json(log_file):
s = []
with open(log_file) as f:
for line in f:
x = line.replace("'", '"')
s.append(json.loads(x))
return s
def order_json(j):
new = OrderedDict()
for key in sorted(j.keys()):
if isinstance(j[key], dict):
value = order_json(j[key])
elif isinstance(j[key], list):
value = sorted(j[key])
else:
value = j[key]
new[key] = value
return new
def sort_ascending(metric):
return metric == "avg_loss" or metric == "perplexity"
def df_summary_exp(df):
return df.groupby("sha1").agg([len, np.mean, np.std, np.min, np.max]) \
.rename(columns={'len': 'num_exps', 'amean': 'mean', 'amin': 'min', 'amax': 'max'})
def df_get_results(result_frame, dataset, num_exps, num_exps_per_config, metric, sort):
datasets = result_frame.dataset.unique()
if dataset not in datasets:
return None
dsr = result_frame[result_frame.dataset == dataset]
if dsr.empty:
return None
df = pd.DataFrame()
if num_exps_per_config is not None:
for gname, rframe in result_frame.groupby("sha1"):
rframe = rframe.copy()
rframe['date'] = | pd.to_datetime(rframe.date) | pandas.to_datetime |
from __future__ import annotations
from daproperties.stats.rv_discrete import rv_discrete
from daproperties.stats.rv_continuous import rv_continuous
from daproperties.stats.rv_mixed import rv_mixed
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KernelDensity
def rv_from_discrete(x:np.ndarray, **kwargs) -> rv_discrete:
df = pd.DataFrame(x)
pmf = df.value_counts(ascending=True, normalize=True, sort=False)
xk = list(pmf.index.values)
pk = pmf.values
return rv_discrete(xk = xk, pk = pk, **kwargs)
def rv_from_continuous(x:np.ndarray, bandwidth_range=np.logspace(-1,1,20), **kwargs) -> rv_continuous:
params = {"bandwidth": bandwidth_range}
kde = None
grid = GridSearchCV(KernelDensity(), params, verbose=1, n_jobs=-1)
grid.fit(x)
kde = grid.best_estimator_
shape = (x.shape[1],)
coverage = _value_range_from_data(x)
return rv_continuous(kde, shape, coverage, **kwargs)
def rv_from_mixed(x:np.ndarray, y:np.ndarray, rv_x:rv_continuous=None, rv_y:rv_discrete=None, bandwidth_range=np.logspace(-1,1,20), **kwargs) -> rv_mixed:
params = {"bandwidth": bandwidth_range}
cond_kdes = {}
labels = | pd.DataFrame(y) | pandas.DataFrame |
"""
This module contains a collection of functions which make plots (saved as png files) using matplotlib, generated from
some model fits and cross-validation evaluation within a MAST-ML run.
This module also contains a method to create python notebooks containing plotted data and the relevant source code from
this module, to enable the user to make their own modifications to the created plots in a straightforward way (useful for
tweaking plots for a presentation or publication).
"""
import math
import statistics
import os
import copy
import pandas as pd
import itertools
import warnings
import logging
from collections import Iterable
from os.path import join
from collections import OrderedDict
from math import log, floor, ceil
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.ensemble._forest import _generate_sample_indices, _get_n_samples_bootstrap
from mpl_toolkits.axes_grid1 import make_axes_locatable
# Ignore the harmless warning about the gelsd driver on mac.
warnings.filterwarnings(action="ignore", module="scipy",
message="^internal gelsd")
# Ignore matplotlib deprecation warning (set as all warnings for now)
warnings.filterwarnings(action="ignore")
import numpy as np
from sklearn.metrics import confusion_matrix, roc_curve, auc, precision_recall_curve
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure, figaspect
from matplotlib.animation import FuncAnimation
from matplotlib.font_manager import FontProperties
from scipy.stats import gaussian_kde, norm
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
# Needed imports for ipynb_maker
#from mastml.utils import nice_range
#from mastml.metrics import nice_names
import inspect
import textwrap
from pandas import DataFrame, Series
import nbformat
from functools import wraps
import forestci as fci
from forestci.calibration import calibrateEB
import copy
matplotlib.rc('font', size=18, family='sans-serif') # set all font to bigger
matplotlib.rc('figure', autolayout=True) # turn on autolayout
# adding dpi as a constant global so it can be changed later
DPI = 250
#logger = logging.getLogger() # only used inside ipynb_maker I guess
# HEADERENDER don't delete this line, it's used by ipynb maker
logger = logging.getLogger('mastml') # the real logger
def ipynb_maker(plot_func):
"""
This method creates Jupyter Notebooks so user can modify and regenerate the plots produced by MAST-ML.
Args:
plot_func: (plot_helper method), a plotting method contained in plot_helper.py which contains the
@ipynb_maker decorator
Returns:
(plot_helper method), the same plot_func as used as input, but after having written the Jupyter notebook with source code to create plot
"""
from mastml import plot_helper # Strange self-import but it works, as had cyclic import issues with ipynb_maker as its own module
@wraps(plot_func)
def wrapper(*args, **kwargs):
# convert everything to kwargs for easier display
# from geniuses at https://stackoverflow.com/a/831164
#kwargs.update(dict(zip(plot_func.func_code.co_varnames, args)))
sig = inspect.signature(plot_func)
binding = sig.bind(*args, **kwargs)
all_args = binding.arguments
# if this is an outdir style function, fill in savepath and delete outdir
if 'savepath' in all_args:
ipynb_savepath = all_args['savepath']
knows_savepath = True
basename = os.path.basename(ipynb_savepath) # fix absolute path problem
elif 'outdir' in all_args:
knows_savepath = False
basename = plot_func.__name__
ipynb_savepath = os.path.join(all_args['outdir'], basename)
else:
raise Exception('you must have an "outdir" or "savepath" argument to use ipynb_maker')
readme = textwrap.dedent(f"""\
This notebook was automatically generated from your MAST-ML run so you can recreate the
plots. Some things are a bit different from the usual way of creating plots - we are
using the [object oriented
interface](https://matplotlib.org/tutorials/introductory/lifecycle.html) instead of
pyplot to create the `fig` and `ax` instances.
""")
# get source of the top of plot_helper.py
header = ""
with open(plot_helper.__file__) as f:
for line in f.readlines():
if 'HEADERENDER' in line:
break
header += line
core_funcs = [plot_helper.stat_to_string, plot_helper.plot_stats, plot_helper.make_fig_ax,
plot_helper.get_histogram_bins, plot_helper.nice_names, plot_helper.nice_range,
plot_helper.nice_mean, plot_helper.nice_std, plot_helper.rounder, plot_helper._set_tick_labels,
plot_helper._set_tick_labels_different, plot_helper._nice_range_helper, plot_helper._nearest_pow_ten,
plot_helper._three_sigfigs, plot_helper._n_sigfigs, plot_helper._int_if_int, plot_helper._round_up,
plot_helper.prediction_intervals]
func_strings = '\n\n'.join(inspect.getsource(func) for func in core_funcs)
plot_func_string = inspect.getsource(plot_func)
# remove first line that has this decorator on it (!!!)
plot_func_string = '\n'.join(plot_func_string.split('\n')[1:])
# put the arguments and their values in the code
arg_assignments = []
arg_names = []
for key, var in all_args.items():
if isinstance(var, DataFrame):
# this is amazing
arg_assignments.append(f"{key} = pd.read_csv(StringIO('''\n{var.to_csv(index=False)}'''))")
elif isinstance(var, Series):
arg_assignments.append(f"{key} = pd.Series(pd.read_csv(StringIO('''\n{var.to_csv(index=False)}''')).iloc[:,0])")
else:
arg_assignments.append(f'{key} = {repr(var)}')
arg_names.append(key)
args_block = ("from numpy import array\n" +
"from collections import OrderedDict\n" +
"from io import StringIO\n" +
"from sklearn.gaussian_process import GaussianProcessRegressor # Need for error plots\n" +
"from sklearn.gaussian_process.kernels import * # Need for error plots\n" +
"from sklearn.ensemble import RandomForestRegressor # Need for error plots\n" +
'\n'.join(arg_assignments))
arg_names = ', '.join(arg_names)
if knows_savepath:
if '.png' not in basename:
basename += '.png'
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
{plot_func.__name__}({arg_names})
display(Image(filename='{basename}'))
""")
else:
main = textwrap.dedent(f"""\
import pandas as pd
from IPython.display import Image, display
plot_paths = plot_predicted_vs_true(train_quad, test_quad, outdir, label)
for plot_path in plot_paths:
display(Image(filename=plot_path))
""")
nb = nbformat.v4.new_notebook()
readme_cell = nbformat.v4.new_markdown_cell(readme)
text_cells = [header, func_strings, plot_func_string, args_block, main]
cells = [readme_cell] + [nbformat.v4.new_code_cell(cell_text) for cell_text in text_cells]
nb['cells'] = cells
nbformat.write(nb, ipynb_savepath + '.ipynb')
return plot_func(*args, **kwargs)
return wrapper
def make_train_test_plots(run, path, is_classification, label, model, train_X, test_X, groups=None):
"""
General plotting method used to execute sequence of specific plots of train-test data analysis
Args:
run: (dict), a particular split_result from masml_driver
path: (str), path to save the generated plots and analysis of split_result designated in 'run'
is_classification: (bool), whether or not the analysis is a classification task
label: (str), name of the y data variable being fit
model: (scikit-learn model object), a scikit-learn model/estimator
train_X: (numpy array), array of X features used in training
test_X: (numpy array), array of X features used in testing
groups: (numpy array), array of group names
Returns:
None
"""
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_classification:
# Need these class prediction probabilities for ROC curve analysis
y_train_pred_proba = run['y_train_pred_proba']
y_test_pred_proba = run['y_test_pred_proba']
title = 'train_confusion_matrix'
plot_confusion_matrix(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title)
title = 'test_confusion_matrix'
plot_confusion_matrix(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title)
title = 'train_roc_curve'
plot_roc_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_roc_curve'
plot_roc_curve(y_test_true, y_test_pred_proba, join(path, title+'png'))
title = 'train_precision_recall_curve'
plot_precision_recall_curve(y_train_true, y_train_pred_proba, join(path, title+'png'))
title = 'test_precision_recall_curve'
plot_precision_recall_curve(y_test_true, y_test_pred_proba, join(path, title + 'png'))
else: # is_regression
plot_predicted_vs_true((y_train_true, y_train_pred, train_metrics, train_groups),
(y_test_true, y_test_pred, test_metrics, test_groups),
path, label=label)
title = 'train_residuals_histogram'
plot_residuals_histogram(y_train_true, y_train_pred,
join(path, title+'.png'), train_metrics,
title=title, label=label)
title = 'test_residuals_histogram'
plot_residuals_histogram(y_test_true, y_test_pred,
join(path, title+'.png'), test_metrics,
title=title, label=label)
def make_error_plots(run, path, is_classification, label, model, train_X, test_X, rf_error_method, rf_error_percentile,
is_validation, validation_column_name, validation_X, groups=None):
y_train_true, y_train_pred, y_test_true = \
run['y_train_true'], run['y_train_pred'], run['y_test_true']
y_test_pred, train_metrics, test_metrics = \
run['y_test_pred'], run['train_metrics'], run['test_metrics']
train_groups, test_groups = run['train_groups'], run['test_groups']
if is_validation:
y_validation_pred, y_validation_true, prediction_metrics = \
run['y_validation_pred'+'_'+str(validation_column_name)], \
run['y_validation_true'+'_'+str(validation_column_name)], \
run['prediction_metrics']
if is_classification:
logger.debug('There is no error distribution plotting for classification problems, just passing through...')
else: # is_regression
#title = 'train_normalized_error'
#plot_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method, percentile,
# X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_normalized_error'
plot_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
#title = 'train_cumulative_normalized_error'
#plot_cumulative_normalized_error(y_train_true, y_train_pred, join(path, title+'.png'), model, error_method,
# percentile, X=train_X, Xtrain=train_X, Xtest=test_X)
title = 'test_cumulative_normalized_error'
plot_cumulative_normalized_error(y_test_true, y_test_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=test_X, Xtrain=train_X, Xtest=test_X)
# HERE, add your RMS residual vs. error plot function
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='test')
if is_validation:
title = 'validation_cumulative_normalized_error'
plot_cumulative_normalized_error(y_validation_true, y_validation_pred, join(path, title+'.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
title = 'validation_normalized_error'
plot_normalized_error(y_validation_true, y_validation_pred, join(path, title + '.png'), model, rf_error_method,
rf_error_percentile, X=validation_X, Xtrain=train_X, Xtest=test_X)
if model.__class__.__name__ in ['RandomForestRegressor', 'ExtraTreesRegressor', 'GaussianProcessRegressor',
'GradientBoostingRegressor', 'EnsembleRegressor']:
y_all_data = np.concatenate([y_test_true, y_train_true])
plot_real_vs_predicted_error(y_all_data, path, model, data_test_type='validation')
@ipynb_maker
def plot_confusion_matrix(y_true, y_pred, savepath, stats, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
Method used to generate a confusion matrix for a classification run. Additional information can be found
at: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Args:
y_true: (numpy array), array containing the true y data values
y_pred: (numpy array), array containing the predicted y data values
savepath: (str), path to save the plotted confusion matrix
stats: (dict), dict of training or testing statistics for a particular run
normalize: (bool), whether or not to normalize data output as truncated float vs. double
title: (str), title of the confusion matrix plot
cmap: (matplotlib colormap), the color map to use for confusion matrix plotting
Returns:
None
"""
# calculate confusion matrix and lables in correct order
cm = confusion_matrix(y_true, y_pred)
#classes = sorted(list(set(y_true).intersection(set(y_pred))))
classes = sorted(list(set(y_true).union(set(y_pred))))
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# create the colorbar, not really needed but everyones got 'em
mappable = ax.imshow(cm, interpolation='nearest', cmap=cmap)
#fig.colorbar(mappable)
# set x and y ticks to labels
tick_marks = range(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, rotation='horizontal', fontsize=18)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, rotation='horizontal', fontsize=18)
# draw number in the boxes
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
# plots the stats
plot_stats(fig, stats, x_align=0.60, y_align=0.90)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_roc_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the receiver-operator characteristic curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted ROC curve
Returns:
None
"""
#TODO: have work when probability=False in model params. Suggest user set probability=True!!
#classes = sorted(list(set(y_true).union(set(y_pred))))
#n_classes = y_pred.shape[1]
classes = list(np.unique(y_true))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(len(classes)):
fpr[i], tpr[i], _ = roc_curve(y_true, y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.plot(fpr[1], tpr[1], color=colors[0], lw=2, label='ROC curve' + ' (area = %0.2f)' % roc_auc[1])
ax.plot([0, 1], [0, 1], color='black', label='Random guess', lw=2, linestyle='--')
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('False Positive Rate', fontsize='16')
ax.set_ylabel('True Positive Rate', fontsize='16')
ax.legend(loc="lower right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
@ipynb_maker
def plot_precision_recall_curve(y_true, y_pred, savepath):
"""
Method to calculate and plot the precision-recall curve for classification model results
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
Returns:
None
"""
# Note this only works with probability predictions of the classifier labels.
classes = list(np.unique(y_true))
precision = dict()
recall = dict()
#roc_auc = dict()
for i in range(len(classes)):
precision[i], recall[i], _ = precision_recall_curve(y_true, y_pred[:, i])
x_align = 0.95
fig, ax = make_fig_ax(aspect_ratio=0.66, x_align=x_align, left=0.15)
colors = ['blue', 'red']
#for i in range(len(classes)):
# ax.plot(fpr[i], tpr[i], color=colors[i], lw=2, label='ROC curve class '+str(i)+' (area = %0.2f)' % roc_auc[i])
ax.step(recall[1], precision[1], color=colors[0], lw=2, label='Precision-recall curve')
#ax.fill_between(recall[1], precision[1], alpha=0.4, color=colors[0])
ax.set_xticks(np.linspace(0, 1, 5))
ax.set_yticks(np.linspace(0, 1, 5))
_set_tick_labels(ax, maxx=1, minn=0)
ax.set_xlabel('Recall', fontsize='16')
ax.set_ylabel('Precision', fontsize='16')
ax.legend(loc="upper right", fontsize=12)
#plot_stats(fig, stats, x_align=0.60, y_align=0.90)
fig.savefig(savepath, dpi=DPI, bbox_to_inches='tight')
return
@ipynb_maker
def plot_residuals_histogram(y_true, y_pred, savepath,
stats, title='residuals histogram', label='residuals'):
"""
Method to calculate and plot the histogram of residuals from regression model
Args:
y_true: (numpy array), array of true y data values
y_pred: (numpy array), array of predicted y data values
savepath: (str), path to save the plotted precision-recall curve
stats: (dict), dict of training or testing statistics for a particular run
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
#ax.set_title(title)
# do the actual plotting
residuals = y_true - y_pred
#Output residuals data and stats to spreadsheet
path = os.path.dirname(savepath)
pd.DataFrame(residuals).describe().to_csv(os.path.join(path,'residual_statistics.csv'))
pd.DataFrame(residuals).to_csv(path+'/'+'residuals.csv')
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=residuals)
ax.hist(residuals, bins=num_bins, color='b', edgecolor='k')
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
plot_stats(fig, pd.DataFrame(residuals).describe().to_dict()[0], x_align=x_align, y_align=0.60)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_target_histogram(y_df, savepath, title='target histogram', label='target values'):
"""
Method to plot the histogram of true y values
Args:
y_df: (pandas dataframe), dataframe of true y data values
savepath: (str), path to save the plotted precision-recall curve
title: (str), title of residuals histogram
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.70
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=x_align)
#ax.set_title(title)
#Get num_bins using smarter method
num_bins = get_histogram_bins(y_df=y_df)
# do the actual plotting
try:
ax.hist(y_df, bins=num_bins, color='b', edgecolor='k')#, histtype='stepfilled')
except:
print('Could not plot target histgram')
return
# normal text stuff
ax.set_xlabel('Value of '+label, fontsize=16)
ax.set_ylabel('Number of occurences', fontsize=16)
# make y axis ints, because it is discrete
#ax.yaxis.set_major_locator(MaxNLocator(integer=True))
plot_stats(fig, dict(y_df.describe()), x_align=x_align, y_align=0.90, fontsize=14)
# Save input data stats to csv
savepath_parse = savepath.split('target_histogram.png')[0]
y_df.describe().to_csv(savepath_parse+'/''input_data_statistics.csv')
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
@ipynb_maker
def plot_predicted_vs_true(train_quad, test_quad, outdir, label):
"""
Method to create a parity plot (predicted vs. true values)
Args:
train_quad: (tuple), tuple containing 4 numpy arrays: true training y data, predicted training y data,
training metric data, and groups used in training
test_quad: (tuple), tuple containing 4 numpy arrays: true test y data, predicted test y data,
testing metric data, and groups used in testing
outdir: (str), path to save plots to
label: (str), label used for axis labeling
Returns:
None
"""
filenames = list()
y_train_true, y_train_pred, train_metrics, train_groups = train_quad
y_test_true, y_test_pred, test_metrics, test_groups = test_quad
# make diagonal line from absolute min to absolute max of any data point
# using round because Ryan did - but won't that ruin small numbers??? TODO this
#max1 = max(y_train_true.max(), y_train_pred.max(),
# y_test_true.max(), y_test_pred.max())
max1 = max(y_train_true.max(), y_test_true.max())
#min1 = min(y_train_true.min(), y_train_pred.min(),
# y_test_true.min(), y_test_pred.min())
min1 = min(y_train_true.min(), y_test_true.min())
max1 = round(float(max1), rounder(max1-min1))
min1 = round(float(min1), rounder(max1-min1))
for y_true, y_pred, stats, groups, title_addon in \
(train_quad+('train',), test_quad+('test',)):
# make fig and ax, use x_align when placing text so things don't overlap
x_align=0.64
fig, ax = make_fig_ax(x_align=x_align)
# set tick labels
# notice that we use the same max and min for all three. Don't
# calculate those inside the loop, because all the should be on the same scale and axis
_set_tick_labels(ax, max1, min1)
# plot diagonal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# do the actual plotting
if groups is None:
ax.scatter(y_true, y_pred, color='blue', edgecolors='black', s=100, zorder=2, alpha=0.7)
else:
handles = dict()
unique_groups = np.unique(np.concatenate((train_groups, test_groups), axis=0))
unique_groups_train = np.unique(train_groups)
unique_groups_test = np.unique(test_groups)
#logger.debug(' '*12 + 'unique groups: ' +str(list(unique_groups)))
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(unique_groups):
mask = groups == group
#logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.scatter(y_true[mask], y_pred[mask], label=group, color=colors[colorcount],
marker=markers[markercount], s=100, alpha=0.7)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
if title_addon == 'train':
to_delete = [k for k in handles.keys() if k not in unique_groups_train]
for k in to_delete:
del handles[k]
elif title_addon == 'test':
to_delete = [k for k in handles.keys() if k not in unique_groups_test]
for k in to_delete:
del handles[k]
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
plot_stats(fig, stats, x_align=x_align, y_align=0.90)
filename = 'predicted_vs_true_'+ title_addon + '.png'
filenames.append(filename)
fig.savefig(join(outdir, filename), dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y_pred': y_pred, 'y_true': y_true})
df.to_csv(join(outdir, 'predicted_vs_true_' + title_addon + '.csv'))
return filenames
def plot_scatter(x, y, savepath, groups=None, xlabel='x', label='target data'):
"""
Method to create a general scatter plot
Args:
x: (numpy array), array of x data
y: (numpy array), array of y data
savepath: (str), path to save plots to
groups: (list), list of group labels
xlabel: (str), label used for x-axis labeling
label: (str), label used for y-axis labeling
Returns:
None
"""
# Set image aspect ratio:
fig, ax = make_fig_ax()
# set tick labels
max_tick_x = max(x)
min_tick_x = min(x)
max_tick_y = max(y)
min_tick_y = min(y)
max_tick_x = round(float(max_tick_x), rounder(max_tick_x-min_tick_x))
min_tick_x = round(float(min_tick_x), rounder(max_tick_x-min_tick_x))
max_tick_y = round(float(max_tick_y), rounder(max_tick_y-min_tick_y))
min_tick_y = round(float(min_tick_y), rounder(max_tick_y-min_tick_y))
#divisor_y = get_divisor(max(y), min(y))
#max_tick_y = round_up(max(y), divisor_y)
#min_tick_y = round_down(min(y), divisor_y)
_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
if groups is None:
ax.scatter(x, y, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
for groupcount, group in enumerate(np.unique(groups)):
mask = groups == group
ax.scatter(x[mask], y[mask], label=group, color=colors[colorcount], marker=markers[markercount], s=100, alpha=0.7)
ax.legend(loc='lower right', fontsize=12)
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.set_xlabel(xlabel, fontsize=16)
ax.set_ylabel('Value of '+label, fontsize=16)
#ax.set_xticklabels(rotation=45)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
def plot_keras_history(model_history, savepath, plot_type):
# Set image aspect ratio:
fig, ax = make_fig_ax()
keys = model_history.history.keys()
for k in keys:
if 'loss' not in k and 'val' not in k:
metric = k
accuracy = model_history.history[str(metric)]
loss = model_history.history['loss']
if plot_type == 'accuracy':
ax.plot(accuracy, label='training '+str(metric))
ax.set_ylabel(str(metric)+' (Accuracy)', fontsize=16)
try:
validation_accuracy = model_history.history['val_'+str(metric)]
ax.plot(validation_accuracy, label='validation '+str(metric))
except:
pass
if plot_type == 'loss':
ax.plot(loss, label='training loss')
ax.set_ylabel(str(metric)+' (Loss)', fontsize=16)
try:
validation_loss = model_history.history['val_loss']
ax.plot(validation_loss, label='validation loss')
except:
pass
ax.legend(loc='upper right', fontsize=12)
#_set_tick_labels_different(ax, max_tick_x, min_tick_x, max_tick_y, min_tick_y)
ax.set_xlabel('Epochs', fontsize=16)
fig.savefig(savepath, dpi=DPI, bbox_inches='tight')
return
@ipynb_maker
def plot_best_worst_split(y_true, best_run, worst_run, savepath,
title='Best Worst Overlay', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of just the best scoring and worst scoring CV splits
Args:
y_true: (numpy array), array of true y data
best_run: (dict), the best scoring split_result from mastml_driver
worst_run: (dict), the worst scoring split_result from mastml_driver
savepath: (str), path to save plots to
title: (str), title of the best_worst_split plot
label: (str), label used for axis labeling
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
maxx = max(y_true) # TODO is round the right thing here?
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set tick labels
_set_tick_labels(ax, maxx, minn)
# do the actual plotting
ax.scatter(best_run['y_test_true'], best_run['y_test_pred'], c='red',
alpha=0.7, label='best test', edgecolor='darkred', zorder=2, s=100)
ax.scatter(worst_run['y_test_true'], worst_run['y_test_pred'], c='blue',
alpha=0.7, label='worst test', edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
#font_dict = {'size' : 10, 'family' : 'sans-serif'}
# Duplicate the stats dicts with an additional label
best_stats = OrderedDict([('Best Run', None)])
best_stats.update(best_run['test_metrics'])
worst_stats = OrderedDict([('worst Run', None)])
worst_stats.update(worst_run['test_metrics'])
plot_stats(fig, best_stats, x_align=x_align, y_align=0.90)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.60)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df_best = pd.DataFrame({'best run pred': best_run['y_test_pred'], 'best run true': best_run['y_test_true']})
df_worst = pd.DataFrame({'worst run pred': worst_run['y_test_pred'], 'worst run true': worst_run['y_test_true']})
df_best.to_csv(savepath + '_best.csv')
df_worst.to_csv(savepath + '_worst.csv')
@ipynb_maker
def plot_best_worst_per_point(y_true, y_pred_list, savepath, metrics_dict,
avg_stats, title='best worst per point', label='target_value'):
"""
Method to create a parity plot (predicted vs. true values) of the set of best and worst CV scores for each
individual data point.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
savepath: (str), path to save plots to
metrics_dict: (dict), dict of scikit-learn metric objects to calculate score of predicted vs. true values
avg_stats: (dict), dict of calculated average metrics over all CV splits
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
worsts = []
bests = []
new_y_true = []
for yt, y_pred in zip(y_true, y_pred_list):
if len(y_pred) == 0 or np.nan in y_pred_list or yt == np.nan:
continue
worsts.append(max(y_pred, key=lambda yp: abs(yp-yt)))
bests.append( min(y_pred, key=lambda yp: abs(yp-yt)))
new_y_true.append(yt)
worst_stats = OrderedDict([('Worst combined:', None)])
best_stats = OrderedDict([('Best combined:', None)])
for name, (_, func) in metrics_dict.items():
worst_stats[name] = func(new_y_true, worsts)
best_stats[name] = func(new_y_true, bests)
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 15.5/24 #mmm yum
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
#all_vals = [val for val in worsts+bests if val is not None]
max1 = max(y_true)
min1 = min(y_true)
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx = max((max(bests), max(worsts), max(new_y_true)))
#minn = min((min(bests), min(worsts), min(new_y_true)))
#maxx, minn = recursive_max_and_min([bests, worsts, new_y_true])
maxx = round(float(max1), rounder(max1-min1))
minn = round(float(min1), rounder(max1-min1))
_set_tick_labels(ax, maxx, minn)
ax.scatter(new_y_true, bests, c='red', alpha=0.7, label='best test',
edgecolor='darkred', zorder=2, s=100)
ax.scatter(new_y_true, worsts, c='blue', alpha=0.7, label='worst test',
edgecolor='darkblue', zorder=3, s=60)
ax.legend(loc='lower right', fontsize=12)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.51, fontsize=10)
plot_stats(fig, worst_stats, x_align=x_align, y_align=0.73, fontsize=10)
plot_stats(fig, best_stats, x_align=x_align, y_align=0.95, fontsize=10)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': new_y_true,
'best per point': bests,
'worst per point': worsts})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_predicted_vs_true_bars(y_true, y_pred_list, avg_stats,
savepath, title='best worst with bars', label='target_value', groups=None):
"""
Method to calculate parity plot (predicted vs. true) of average predictions, averaged over all CV splits, with error
bars on each point corresponding to the standard deviation of the predicted values over all CV splits.
Args:
y_true: (numpy array), array of true y data
y_pred_list: (list), list of numpy arrays containing predicted y data for each CV split
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
title: (str), title of the best_worst_per_point plot
label: (str), label used for axis labeling
Returns:
None
"""
means = [nice_mean(y_pred) for y_pred in y_pred_list]
standard_error_means = [nice_std(y_pred)/np.sqrt(len(y_pred))
for y_pred in y_pred_list]
standard_errors = [nice_std(y_pred) for y_pred in y_pred_list]
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
max1 = max(np.nanmax(y_true), np.nanmax(means))
min1 = min(np.nanmin(y_true), np.nanmin(means))
# draw dashed horizontal line
ax.plot([min1, max1], [min1, max1], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+label, fontsize=16)
ax.set_ylabel('Predicted '+label, fontsize=16)
# set tick labels
#maxx, minn = recursive_max_and_min([means, y_true])
maxx = max(y_true)
minn = min(y_true)
maxx = round(float(maxx), rounder(maxx-minn))
minn = round(float(minn), rounder(maxx-minn))
#print(maxx, minn, rounder(maxx - minn))
_set_tick_labels(ax, maxx, minn)
if groups is None:
ax.errorbar(y_true, means, yerr=standard_errors, fmt='o', markerfacecolor='blue', markeredgecolor='black', markersize=10,
alpha=0.7, capsize=3)
else:
colors = ['blue', 'red', 'green', 'purple', 'orange', 'black']
markers = ['o', 'v', '^', 's', 'p', 'h', 'D', '*', 'X', '<', '>', 'P']
colorcount = markercount = 0
handles = dict()
unique_groups = np.unique(groups)
for groupcount, group in enumerate(unique_groups):
mask = groups == group
# logger.debug(' '*12 + f'{group} group_percent = {np.count_nonzero(mask) / len(groups)}')
handles[group] = ax.errorbar(y_true[mask], np.array(means)[mask], yerr=np.array(standard_errors)[mask],
marker=markers[markercount], markerfacecolor=colors[colorcount],
markeredgecolor=colors[colorcount], ecolor=colors[colorcount],
markersize=10, alpha=0.7, capsize=3, fmt='o')
colorcount += 1
if colorcount % len(colors) == 0:
markercount += 1
colorcount = 0
ax.legend(handles.values(), handles.keys(), loc='lower right', fontsize=10)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
fig.savefig(savepath + '.png', dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': y_true,
'average predicted values': means,
'error bar values': standard_errors})
df.to_csv(savepath + '.csv')
@ipynb_maker
def plot_metric_vs_group(metric, groups, stats, avg_stats, savepath):
"""
Method to plot the value of a particular calculated metric (e.g. RMSE, R^2, etc) for each data group
Args:
metric: (str), name of a calculation metric
groups: (numpy array), array of group names
stats: (dict), dict of training or testing statistics for a particular run
avg_stats: (dict), dict of calculated average metrics over all CV splits
savepath: (str), path to save plots to
Returns:
None
"""
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# do the actual plotting
ax.scatter(groups, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group', fontsize=16)
ax.set_ylabel(metric, fontsize=16)
ax.set_xticklabels(labels=groups, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
# Save data stats to csv
savepath_parse = savepath.split(str(metric)+'_vs_group.png')[0]
| pd.DataFrame(groups, stats) | pandas.DataFrame |
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
import math
OVERVIEW = "Overview"
POPU_DIST = "Population Age Distribution"
VAR_RELATIONSHIP_PER_COUNTRY = "Health & Economy Interaction, per Country"
ONE_VAR_ACROSS_REGION = "Health / Economy over the World"
SINGLE_FACTOR_OVER_TIME = 'Life Expectancies & Other Indicators'
POINT2_PLACEHOLDER = 'Health & Economy Interaction, per Year'
# locations of data
HEALTH_URL = "https://raw.githubusercontent.com/CMU-IDS-2020/a3-05839_a3/master/data/health.csv"
OTHER_DATA_URL = "https://raw.githubusercontent.com/CMU-IDS-2020/a3-05839_a3/master/data/merged_data_country_only_with_id_lat_lon.csv"
MERGED_URL = "https://raw.githubusercontent.com/CMU-IDS-2020/a3-05839_a3/master/data/merged_data_country_only.csv"
WORLD_MAP_URL = "https://raw.githubusercontent.com/vega/vega-datasets/master/data/world-110m.json"
# locations of markdowns
def main():
# Add a selector for the app mode on the sidebar.
st.sidebar.title("Navigation")
vis_topic = st.sidebar.radio("",
(str(OVERVIEW), str(POPU_DIST), str(SINGLE_FACTOR_OVER_TIME) , str(POINT2_PLACEHOLDER), str(VAR_RELATIONSHIP_PER_COUNTRY), str(ONE_VAR_ACROSS_REGION)))
if vis_topic == OVERVIEW:
# Render main readme, placeholder
st.title(OVERVIEW)
st.markdown('''
National economic status and health level are crucial indicators of a country’s development.
Our interactive application aims to tell stories about worldwide and historical patterns,
using comprehensive data on various indicators from the World Bank.
With our application, you can explore the trend of an individual economy or health indicator, the relationship between an economy indicator and a health indicator, as well as a country’s population age distribution.
The visualizations, for each of which detailed instructions are provided, should allow you to explore these trends both temporally and spatially.
Through the visualizations, we hope you can get insights in economy and population health among countries over the past few decades,
and correlations of economy and health indicators, if any.
''')
elif vis_topic == POPU_DIST:
st.title(POPU_DIST)
run_popu_dist()
elif vis_topic == VAR_RELATIONSHIP_PER_COUNTRY:
st.write(VAR_RELATIONSHIP_PER_COUNTRY)
run_var_relationship_per_country()
elif vis_topic == ONE_VAR_ACROSS_REGION:
st.write(ONE_VAR_ACROSS_REGION)
run_one_var_across_region()
elif vis_topic == SINGLE_FACTOR_OVER_TIME:
st.title(SINGLE_FACTOR_OVER_TIME)
run_trend_over_time()
elif vis_topic == POINT2_PLACEHOLDER:
st.title(POINT2_PLACEHOLDER)
run_relationship_per_year_all_countries()
@st.cache
def load_data(url):
data = pd.read_csv(url, header=0, skipinitialspace=True)
data = data[data['Year'] <= 2017]
countries = data['Country Name'].unique()
return data, countries
@st.cache
def group_by_country(df):
return df.groupby(['Country Name'])
@st.cache
def load_health_data():
df, countries = load_data(HEALTH_URL)
return df, countries
@st.cache
def load_other_data():
df, countries = load_data(OTHER_DATA_URL)
df['id'] = df['id'].astype(str)
df['id'] = df['id'].str.zfill(3)
econ_indicators = df.columns[[3, 6, 8]]
health_indicators = df.columns[[4, 5, 7]]
return df, countries, econ_indicators, health_indicators
@st.cache
def load_merge_data():
df, countries = load_data(MERGED_URL)
return df, countries
@st.cache
def data_group_by_country(df):
grouped = dict(tuple(group_by_country(df)))
return grouped
@st.cache
def keep_only_selected_countries(df, selected_countries):
# takes in a dataframe and list of country names
# this function will return a dataframe with only data from the specified countries
sub_df = df[df['Country Name'].isin(selected_countries)]
return sub_df
@st.cache
def dropna_by_feature(df, features):
# takes in a dataframe and list of features to checkon,
# this function will drop rows which has np.nan in any of the features specifed
return df.dropna(how='any', subset=features)
def run_popu_dist():
@st.cache
def get_total_ymax(country_df, health_df):
# fixing y range for better visualizing the change
maxy = 0.0
for col in country_df.columns:
if '% of' in col and 'Population ages' in col:
curr_max = health_df[col].max()
if curr_max > maxy:
maxy = curr_max
return maxy
@st.cache
def get_only_tens(country_df):
years = list(np.sort(country_df['Year'].unique()))
# get selection interval
interval = math.floor(len(years) / 5)
if interval == 0:
only_ten = country_df
else:
temp = []
idx = len(years) - 1
count = 0
while idx > 0 and count < 5:
temp.append(years[idx])
idx -= interval
count += 1
only_ten = country_df[country_df['Year'].isin(temp)]
return only_ten
@st.cache
def get_only_ten_general(country_df, age_ranges):
only_ten = get_only_tens(country_df)
values2 = []
years2 = []
indexes = []
ages = []
for _, data_row in only_ten.iterrows():
for i in range(len(age_ranges)):
values2.append(data_row['Population ages {} (% of total population)'.format(str(age_ranges[i]))] * data_row['Population, total'])
years2.append(data_row['Year'])
indexes.append(i)
ages.append(age_ranges[i])
overall_data = | pd.DataFrame({'Idx': indexes, 'Population Ages': ages, 'Year': years2, 'Population': values2}) | pandas.DataFrame |
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype('category')
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype('category')
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != 'MSFT']
result = merge_asof(trades, q,
on='time',
by='ticker')
expected.loc[expected.ticker == 'MSFT', ['bid', 'ask']] = np.nan
assert_frame_equal(result, expected)
def test_basic2(self):
expected = self.read_data('asof2.csv')
trades = self.read_data('trades2.csv')
quotes = self.read_data('quotes2.csv', dedupe=True)
result = merge_asof(trades, quotes,
on='time',
by='ticker')
assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = lambda x: x[x.ticker == 'MSFT'].drop('ticker', axis=1) \
.reset_index(drop=True)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes,
on='time')
assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
left_on='time',
right_on='bid',
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on=['time', 'ticker'],
by='ticker')
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
by='ticker')
def test_with_duplicates(self):
q = pd.concat([self.quotes, self.quotes]).sort_values(
['time', 'ticker']).reset_index(drop=True)
result = merge_asof(self.trades, q,
on='time',
by='ticker')
expected = self.read_data('asof.csv')
assert_frame_equal(result, expected)
result = merge_asof(self.trades, q,
on='time',
by='ticker',
check_duplicates=False)
expected = self.read_data('asof.csv')
expected = pd.concat([expected, expected]).sort_values(
['time', 'ticker']).reset_index(drop=True)
# the results are not ordered in a meaningful way
# nor are the exact matches duplicated, so comparisons
# are pretty tricky here, however the uniques are the same
def aligner(x, ticker):
return (x[x.ticker == ticker]
.sort_values(['time', 'ticker', 'quantity', 'price',
'marketCenter', 'bid', 'ask'])
.drop_duplicates(keep='last')
.reset_index(drop=True)
)
for ticker in expected.ticker.unique():
r = aligner(result, ticker)
e = aligner(expected, ticker)
assert_frame_equal(r, e)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 3, 3],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key', check_duplicates=False)
expected = pd.DataFrame({'key': [1, 1, 3, 3],
'left_val': [1, 2, 3, 3],
'right_val': [1, 1, 2, 3]})
assert_frame_equal(result, expected)
df1 = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3]})
df2 = pd.DataFrame({'key': [1, 2, 2],
'right_val': [1, 2, 3]})
result = merge_asof(df1, df2, on='key')
expected = pd.DataFrame({'key': [1, 1, 3],
'left_val': [1, 2, 3],
'right_val': [1, 1, 3]})
assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
allow_exact_matches='foo')
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1s'))
# integer
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1)
# incompat
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=1)
# invalid
with self.assertRaises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=1.0)
# invalid negative
with self.assertRaises(MergeError):
merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=-Timedelta('1s'))
with self.assertRaises(MergeError):
merge_asof(trades.reset_index(), quotes.reset_index(),
on='index',
by='ticker',
tolerance=-1)
def test_non_sorted(self):
trades = self.trades.sort_values('time', ascending=False)
quotes = self.quotes.sort_values('time', ascending=False)
# we require that we are already sorted on time & quotes
self.assertFalse(trades.time.is_monotonic)
self.assertFalse(quotes.time.is_monotonic)
with self.assertRaises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
trades = self.trades.sort_values('time')
self.assertTrue(trades.time.is_monotonic)
self.assertFalse(quotes.time.is_monotonic)
with self.assertRaises(ValueError):
merge_asof(trades, quotes,
on='time',
by='ticker')
quotes = self.quotes.sort_values('time')
self.assertTrue(trades.time.is_monotonic)
self.assertTrue(quotes.time.is_monotonic)
# ok, though has dupes
merge_asof(trades, self.quotes,
on='time',
by='ticker')
def test_tolerance(self):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=Timedelta('1day'))
expected = self.tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
allow_exact_matches=False)
expected = self.allow_exact_matches
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(self.trades, self.quotes,
on='time',
by='ticker',
tolerance=Timedelta('100ms'),
allow_exact_matches=False)
expected = self.allow_exact_matches_and_tolerance
assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.030']),
'username': ['bob']})
df2 = pd.DataFrame({
'time': pd.to_datetime(['2016-07-15 13:30:00.000',
'2016-07-15 13:30:00.030']),
'version': [1, 2]})
result = pd.merge_asof(df1, df2, on='time')
expected = pd.DataFrame({
'time': | pd.to_datetime(['2016-07-15 13:30:00.030']) | pandas.to_datetime |
# standard libraries
import os
# third-party libraries
import pandas as pd
# local imports
from .. import count_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class TestCsvToDf:
"""
Tests converting a csv with various headers into a processible DataFrame
"""
def test_timestamp(self):
"""
Check if a csv w/ a timestamp is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp.csv')
element_id = 'tagID'
timestamp = 'timestamp'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == -6761865716520410554
def test_timestamp_ba(self):
"""
Check if a csv w/ a timestamp and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_timestamp_ba.csv')
element_id = 'tagID'
timestamp = 'timestamp'
boardings = 'boardings'
alightings = 'alightings'
lat = 'lat'
lon = 'lon'
test_df = count_data.csv_to_df(data, element_id=element_id, timestamp=timestamp,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7008548250528393651
def test_session(self):
"""
Check if a csv w/ session times is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 7098407329788286247
def test_session_ba(self):
"""
Check if a csv w/ session times and grouped counts is properly converted to the desired DataFrame
"""
data = os.path.join(THIS_DIR, 'test_session_ba.csv')
element_id = 'MacPIN'
session_start = 'SessionStart_Epoch'
session_end = 'SessionEnd_Epoch'
boardings = 'boardings'
alightings = 'alightings'
lat = 'GPS_LAT'
lon = 'GPS_LONG'
test_df = count_data.csv_to_df(data, element_id=element_id, session_start=session_start, session_end=session_end,
boardings=boardings, alightings=alightings, lat=lat, lon=lon)
assert pd.util.hash_pandas_object(test_df).sum() == 2589903708124850504
class TestStandardizeDatetime:
"""
Tests ensuring all times are datetime format
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already datetime and no change is needed
"""
test_times = ['2018-02-22 20:08:00', '2018-02-09 18:05:00', '2018-02-09 18:26:00']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_timestamp_epoch(self):
"""
Tests if timestamp is an epoch time
"""
test_times = ['1519330080', '1518199500', '1518200760']
test_df = pd.DataFrame(test_times, columns=['timestamp'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['timestamp'].dtype == 'datetime64[ns]'
def test_session_epoch(self):
"""
Tests if session times are epoch times
"""
test_times = [['1519330080', '1518199500'], ['1518200760', '1519330080'], ['1518199500', '1518200760']]
test_df = pd.DataFrame(test_times, columns=['session_start', 'session_end'])
processed_df = count_data.standardize_datetime(test_df)
assert processed_df['session_start'].dtype == 'datetime64[ns]'
assert processed_df['session_end'].dtype == 'datetime64[ns]'
class TestStandardizeEpoch:
"""
Tests ensuring all times are unix epoch
"""
def test_no_change_needed(self):
"""
Tests if all timestamps are already epochs and no change is needed
"""
test_times = [1519330080, 1518199500, 1518200760]
test_df = | pd.DataFrame(test_times, columns=['timestamp']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Add model years to an existing Scenario."""
# Sections of the code:
#
# I. Required python packages are imported
# II. Generic utilities for dataframe manipulation
# III. The main function, add_year()
# IV. Function add_year_set() for adding and modifying the sets
# V. Function add_year_par() for copying and modifying each parameter
# VI. Two utility functions, interpolate_1d() and interpolate_2d(), for
# calculating missing values
# %% I) Importing required packages
import numpy as np
import pandas as pd
# %% II) Utility functions for dataframe manupulation
def intpol(y1, y2, x1, x2, x):
"""Interpolate between (*x1*, *y1*) and (*x2*, *y2*) at *x*.
Parameters
----------
y1, y2 : float or pd.Series
x1, x2, x : int
"""
if x2 == x1 and y2 != y1:
print('>>> Warning <<<: No difference between x1 and x2,'
'returned empty!!!')
return []
elif x2 == x1 and y2 == y1:
return y1
else:
y = y1 + ((y2 - y1) / (x2 - x1)) * (x - x1)
return y
def slice_df(df, idx, level, locator, value):
"""Slice a MultiIndex DataFrame and set a value to a specific level.
Parameters
----------
df : pd.DataFrame
idx : list of indices
level: str
locator : list
value : int or str
"""
if locator:
df = df.reset_index().loc[df.reset_index()[level].isin(locator)].copy()
else:
df = df.reset_index().copy()
if value:
df[level] = value
return df.set_index(idx)
def mask_df(df, index, count, value):
"""Create a mask for removing extra values from *df*."""
df.loc[index, df.columns > (df.loc[[index]].notnull().cumsum(
axis=1) == count).idxmax(axis=1).values[0]] = value
def unit_uniform(df):
"""Make units in *df* uniform."""
column = [x for x in df.columns if x in ['commodity', 'emission']]
if column:
com_list = set(df[column[0]])
for com in com_list:
df.loc[df[column[0]] == com, 'unit'] = df.loc[
df[column[0]] == com, 'unit'].mode()[0]
else:
df['unit'] = df['unit'].mode()[0]
return df
# %% III) The main function
def add_year(sc_ref, sc_new, years_new, firstyear_new=None, lastyear_new=None,
macro=False, baseyear_macro=None, parameter='all', region='all',
rewrite=True, unit_check=True, extrapol_neg=None,
bound_extend=True):
"""Add years to *sc_ref* to produce *sc_new*.
:meth:`add_year` does the following:
1. calls :meth:`add_year_set` to add and modify required sets.
2. calls :meth:`add_year_par` to add new years and modifications to each
parameter if needed.
Parameters
-----------
sc_ref : ixmp.Scenario
Reference scenario.
sc_new : ixmp.Scenario
New scenario.
yrs_new : list of int
New years to be added.
firstyear_new : int, optional
New first model year for new scenario.
macro : bool
Add new years to parameters of the MACRO model.
baseyear_macro : int
New base year for the MACRO model.
parameter: list of str or 'all'
Parameters for adding new years.
rewrite: bool
Permit rewriting a parameter in new scenario when adding new years.
check_unit: bool
Harmonize the units for each commodity, if there is inconsistency
across model years.
extrapol_neg: float
When extrapolation produces negative values, replace with a multiple of
the value for the previous timestep.
bound_extend: bool
Duplicate data from the previous timestep when there is only one data
point for interpolation (e.g., permitting the extension of a bound to
2025, when there is only one value in 2020).
"""
# III.A) Adding sets and required modifications
years_new = sorted([x for x in years_new if str(x)
not in set(sc_ref.set('year'))])
add_year_set(sc_ref, sc_new, years_new, firstyear_new, lastyear_new,
baseyear_macro)
# -------------------------------------------------------------------------
# III.B) Adding parameters and calculating the missing values for the
# additonal years
if parameter in ('all', ['all']):
par_list = sorted(sc_ref.par_list())
elif isinstance(parameter, list):
par_list = parameter
elif isinstance(parameter, str):
par_list = [parameter]
else:
print('Parameters should be defined in a list of strings or as'
' a single string!')
if 'technical_lifetime' in par_list:
par_list.insert(0, par_list.pop(par_list.index('technical_lifetime')))
if region in ('all', ['all']):
reg_list = sc_ref.set('node').tolist()
elif isinstance(region, list):
reg_list = region
elif isinstance(region, str):
reg_list = [region]
else:
print('Regions should be defined in a list of strings or as'
' a single string!')
# List of parameters to be ignored (even not copied to the new
# scenario)
par_ignore = ['duration_period']
par_list = [x for x in par_list if x not in par_ignore]
if not macro:
par_macro = ['demand_MESSAGE', 'price_MESSAGE', 'cost_MESSAGE',
'gdp_calibrate', 'historical_gdp', 'MERtoPPP', 'kgdp',
'kpvs', 'depr', 'drate', 'esub', 'lotol', 'p_ref', 'lakl',
'prfconst', 'grow', 'aeei', 'aeei_factor', 'gdp_rate']
par_list = [x for x in par_list if x not in par_macro]
if not sc_new.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_new = sc_new.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_new = min([int(x) for x in sc_new.set('year').tolist()])
if not sc_ref.set('cat_year', {'type_year': 'firstmodelyear'}).empty:
firstyear_ref = sc_ref.set('cat_year',
{'type_year': 'firstmodelyear'})['year']
else:
firstyear_ref = firstyear_new
for parname in par_list:
# For historical parameters extrapolation permitted (e.g., from
# 2010 to 2015)
if 'historical' in parname:
extrapol = True
yrs_new = [x for x in years_new if x < int(firstyear_new)]
elif int(firstyear_ref) > int(firstyear_new):
extrapol = True
yrs_new = years_new
else:
extrapol = False
yrs_new = years_new
if 'bound' in parname:
bound_ext = bound_extend
else:
bound_ext = True
year_list = [x for x in sc_ref.idx_sets(parname) if 'year' in x]
if len(year_list) == 2 or parname in ['land_output']:
# The loop over "node" is only for reducing the size of tables
for node in reg_list:
add_year_par(sc_ref, sc_new, yrs_new, parname, [node],
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
else:
add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list,
firstyear_new, extrapol, rewrite, unit_check,
extrapol_neg, bound_ext)
sc_new.set_as_default()
print('> All required parameters were successfully '
'added to the new scenario.')
# %% Submodules needed for running the main function
# IV) Adding new years to sets
def add_year_set(sc_ref, sc_new, years_new, firstyear_new=None,
lastyear_new=None, baseyear_macro=None):
"""Add new years to sets.
:meth:`add_year_set` adds additional years to an existing scenario, by
starting to make a new scenario from scratch. After modification of the
year-related sets, all other sets are copied from *sc_ref* to *sc_new*.
See :meth:`add_year` for parameter descriptions.
"""
# IV.A) Treatment of the additional years in the year-related sets
# A.1. Set - year
yrs_old = list(map(int, sc_ref.set('year')))
horizon_new = sorted(yrs_old + years_new)
sc_new.add_set('year', [str(yr) for yr in horizon_new])
# A.2. Set _ type_year
yr_typ = sc_ref.set('type_year').tolist()
sc_new.add_set('type_year', sorted(yr_typ + [str(yr) for yr in years_new]))
# A.3. Set _ cat_year
yr_cat = sc_ref.set('cat_year')
# A.4. Changing the first year if needed
if firstyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'] = firstyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['firstmodelyear', firstyear_new]
if lastyear_new:
if not yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear'].empty:
yr_cat.loc[yr_cat['type_year'] == 'lastmodelyear',
'year'] = lastyear_new
else:
yr_cat.loc[len(yr_cat.index)] = ['lastmodelyear', lastyear_new]
# A.5. Changing the base year and initialization year of macro if a new
# year specified
if baseyear_macro:
if not yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'baseyear_macro',
'year'] = baseyear_macro
if not yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'].empty:
yr_cat.loc[yr_cat['type_year'] == 'initializeyear_macro',
'year'] = baseyear_macro
yr_pair = []
for yr in years_new:
yr_pair.append([yr, yr])
yr_pair.append(['cumulative', yr])
yr_cat = yr_cat.append(pd.DataFrame(yr_pair,
columns=['type_year', 'year']),
ignore_index=True
).sort_values('year').reset_index(drop=True)
# A.6. Changing the cumulative years based on the new first model year
if 'firstmodelyear' in set(yr_cat['type_year']):
firstyear_new = int(yr_cat.loc[yr_cat['type_year'] == 'firstmodelyear',
'year'])
yr_cat = yr_cat.drop(yr_cat.loc[(yr_cat['type_year'] == 'cumulative'
) & (yr_cat['year'] < firstyear_new)
].index)
sc_new.add_set('cat_year', yr_cat)
# IV.B) Copying all other sets
set_list = [s for s in sc_ref.set_list() if 'year' not in s]
# Sets with one index set
index_list = [x for x in set_list if not isinstance(sc_ref.set(x),
pd.DataFrame)]
for set_name in index_list:
if set_name not in sc_new.set_list():
sc_new.init_set(set_name, idx_sets=None, idx_names=None)
sc_new.add_set(set_name, sc_ref.set(set_name).tolist())
# The rest of the sets
for set_name in [x for x in set_list if x not in index_list]:
new_set = [x for x in sc_ref.idx_sets(set_name
) if x not in sc_ref.set_list()]
if set_name not in sc_new.set_list() and not new_set:
sc_new.init_set(set_name,
idx_sets=sc_ref.idx_sets(set_name),
idx_names=sc_ref.idx_names(set_name))
sc_new.add_set(set_name, sc_ref.set(set_name))
sc_new.commit('sets added!')
print('> All the sets updated and added to the new scenario.')
# %% V) Adding new years to parameters
def add_year_par(sc_ref, sc_new, yrs_new, parname, reg_list, firstyear_new,
extrapolate=False, rewrite=True, unit_check=True,
extrapol_neg=None, bound_extend=True):
"""Add new years to parameters.
This function adds additional years to a parameter. The value of the
parameter for additional years is calculated mainly by interpolating and
extrapolating data from existing years.
See :meth:`add_year` for parameter descriptions.
"""
# V.A) Initialization and checks
par_list_new = sc_new.par_list()
idx_names = sc_ref.idx_names(parname)
horizon = sorted([int(x) for x in list(set(sc_ref.set('year')))])
node_col = [x for x in idx_names if x in ['node', 'node_loc', 'node_rel']]
year_list = [x for x in idx_names if x in ['year', 'year_vtg', 'year_act',
'year_rel']]
if parname not in par_list_new:
sc_new.check_out()
sc_new.init_par(parname, idx_sets=sc_ref.idx_sets(parname),
idx_names=sc_ref.idx_names(parname))
sc_new.commit('New parameter initiated!')
if node_col:
par_old = sc_ref.par(parname, {node_col[0]: reg_list})
par_new = sc_new.par(parname, {node_col[0]: reg_list})
sort_order = [node_col[0], 'technology',
'commodity', 'mode', 'emission'] + year_list
nodes = par_old[node_col[0]].unique().tolist()
else:
par_old = sc_ref.par(parname)
par_new = sc_new.par(parname)
sort_order = ['technology', 'commodity'] + year_list
nodes = ['N/A']
if not par_new.empty and not rewrite:
print('> Parameter "' + parname + '" already has data in new scenario'
' and left unchanged for node/s: {}.'.format(reg_list))
return
if par_old.empty:
print('> Parameter "' + parname + '" is empty in reference scenario'
' for node/s: {}!'.format(reg_list))
return
# Sorting the data to make it ready for dataframe manupulation
sort_order = [x for x in sort_order if x in idx_names]
if sort_order:
par_old = par_old.sort_values(sort_order).reset_index(drop=True)
rem_idx = [x for x in par_old.columns if x not in sort_order]
par_old = par_old.reindex(columns=sort_order + rem_idx)
sc_new.check_out()
if not par_new.empty and rewrite:
print('> Parameter "' + parname + '" is being removed from new'
' scenario to be updated for node/s in {}...'.format(nodes))
sc_new.remove_par(parname, par_new)
# A uniform "unit" for values in different years
if 'unit' in par_old.columns and unit_check:
par_old = unit_uniform(par_old)
# ---------------------------------------------------------------------------
# V.B) Adding new years to a parameter based on time-related indexes
# V.B.1) Parameters with no time index
if len(year_list) == 0:
sc_new.add_par(parname, par_old)
sc_new.commit(parname)
print('> Parameter "' + parname + '" just copied to new scenario '
'since has no time-related entries.')
# V.B.2) Parameters with one index related to time
elif len(year_list) == 1:
year_col = year_list[0]
df = par_old.copy()
df_y = interpolate_1d(df, yrs_new, horizon, year_col, 'value',
extrapolate, extrapol_neg, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(' ')
print('> Parameter "{}" copied and new years'
' added for node/s: "{}".'.format(parname, nodes))
# V.B.3) Parameters with two indexes related to time (such as 'input')
elif len(year_list) == 2:
year_col = 'year_act'
year_ref = [x for x in year_list if x != year_col][0]
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
year_diff = [x for x in horizon[1:-1] if f(horizon, horizon.index(x))]
print('> Parameter "{}" is being added for node/s'
' "{}"...'.format(parname, nodes))
# Flagging technologies that have lifetime for adding new timesteps
yr_list = [int(x) for x in set(sc_new.set('year')
) if int(x) > int(firstyear_new)]
min_step = min(np.diff(sorted(yr_list)))
par_tec = sc_new.par('technical_lifetime', {'node_loc': nodes})
# Technologies with lifetime bigger than minimum time interval
par_tec = par_tec.loc[par_tec['value'] > min_step]
df = par_old.copy()
if parname == 'relation_activity':
tec_list = []
else:
tec_list = [t for t in (set(df['technology'])
) if t in list(set(par_tec['technology']))]
df_y = interpolate_2d(df, yrs_new, horizon, year_ref, year_col,
tec_list, par_tec, 'value', extrapolate,
extrapol_neg, year_diff, bound_extend)
sc_new.add_par(parname, df_y)
sc_new.commit(parname)
print('> Parameter "{}" copied and new years added'
' for node/s: "{}".'.format(parname, nodes))
# %% VI) Required functions
def interpolate_1d(df, yrs_new, horizon, year_col, value_col='value',
extrapolate=False, extrapol_neg=None, bound_extend=True):
"""Interpolate data with one year dimension.
This function receives a parameter data as a dataframe, and adds new data
for the additonal years by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
bound_extend : bool
Allow extrapolation of bounds for new years
"""
horizon_new = sorted(horizon + yrs_new)
idx = [x for x in df.columns if x not in [year_col, value_col]]
if not df.empty:
df2 = df.pivot_table(index=idx, columns=year_col, values=value_col)
# To sort the new years smaller than the first year for
# extrapolation (e.g. 2025 values are calculated first; then
# values of 2015 based on 2020 and 2025)
year_before = sorted([x for x in yrs_new if x < min(df2.columns
)], reverse=True)
if year_before and extrapolate:
for y in year_before:
yrs_new.insert(len(yrs_new), yrs_new.pop(yrs_new.index(y)))
for yr in yrs_new:
if yr > max(horizon):
extrapol = True
else:
extrapol = extrapolate
# a) If this new year greater than modeled years, do extrapolation
if yr > max(df2.columns) and extrapol:
if yr == horizon_new[horizon_new.index(max(df2.columns)) + 1]:
year_pre = max([x for x in df2.columns if x < yr])
if len([x for x in df2.columns if x < yr]) >= 2:
year_pp = max([x for x in df2.columns if x < year_pre])
df2[yr] = intpol(df2[year_pre], df2[year_pp],
year_pre, year_pp, yr)
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_pre] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_pre] >= 0),
yr] = df2.loc[(df2[yr] < 0
) & (df2[year_pre] >= 0),
year_pre] * extrapol_neg
else:
df2[yr] = df2[year_pre]
# b) If the new year is smaller than modeled years, extrapolate
elif yr < min(df2.columns) and extrapol:
year_next = min([x for x in df2.columns if x > yr])
# To make sure the new year is not two steps smaller
cond = (year_next == horizon_new[horizon_new.index(yr) + 1])
if len([x for x in df2.columns if x > yr]) >= 2 and cond:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = intpol(df2[year_next], df2[year_nn],
year_next, year_nn, yr)
df2[yr][np.isinf(df2[year_next])] = df2[year_next]
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
elif bound_extend and cond:
df2[yr] = df2[year_next]
# c) Otherise, do intrapolation
elif yr > min(df2.columns) and yr < max(df2.columns):
year_pre = max([x for x in df2.columns if x < yr])
year_next = min([x for x in df2.columns if x > yr])
df2[yr] = intpol(df2[year_pre], df2[year_next],
year_pre, year_next, yr)
# Extrapolate for new years if the value exists for the
# previous year but not for the next years
# TODO: here is the place that should be changed if the
# new year should go to the time step before the existing one
if [x for x in df2.columns if x > year_next]:
year_nn = min([x for x in df2.columns if x > year_next])
df2[yr] = df2[yr].fillna(intpol(df2[year_next],
df2[year_nn], year_next,
year_nn, yr))
if not df2[yr].loc[(df2[yr] < 0) & (df2[year_next] >= 0)
].empty and extrapol_neg:
df2.loc[(df2[yr] < 0) & (df2[year_next] >= 0), yr
] = df2.loc[(df2[yr] < 0
) & (df2[year_next] >= 0),
year_next] * extrapol_neg
if bound_extend:
df2[yr] = df2[yr].fillna(df2[year_pre])
df2[yr][np.isinf(df2[year_pre])] = df2[year_pre]
df2 = pd.melt(df2.reset_index(), id_vars=idx,
value_vars=[x for x in df2.columns if x not in idx],
var_name=year_col, value_name=value_col
).dropna(subset=[value_col]).reset_index(drop=True)
df2 = df2.sort_values(idx).reset_index(drop=True)
else:
print('+++ WARNING: The submitted dataframe is empty, so returned'
' empty results!!! +++')
df2 = df
return df2
# %% VI.B) Interpolating parameters with two dimensions related to time
def interpolate_2d(df, yrs_new, horizon, year_ref, year_col, tec_list, par_tec,
value_col='value', extrapolate=False, extrapol_neg=None,
year_diff=None, bound_extend=True):
"""Interpolate parameters with two dimensions related year.
This function receives a dataframe that has 2 time-related columns (e.g.,
"input" or "relation_activity"), and adds new data for the additonal years
in both time-related columns by interpolation and extrapolation.
Parameters
----------
df : pandas.DataFrame
The dataframe of the parameter to which new years to be added.
yrs_new : list of int
New years to be added.
horizon: list of int
The horizon of the reference scenario.
year_ref : str
The header of the first column to which the new years should be added,
e.g. `'year_vtg'`.
year_col : str
The header of the column to which the new years should be added, e.g.
`'year_act'`.
tec_list : list of str
List of technologies in the parameter ``technical_lifetime``.
par_tec : pandas.DataFrame
Parameter ``technical_lifetime``.
value_col : str
The header of the column containing values.
extrapolate : bool
Allow extrapolation when a new year is outside the parameter years.
extrapol_neg : bool
Allow negative values obtained by extrapolation.
year_diff : list of int
List of model years with different time intervals before and after them
bound_extend : bool
Allow extrapolation of bounds for new years based on one data point
"""
def idx_check(df1, df2):
return df1.loc[df1.index.isin(df2.index)]
if df.empty:
return df
print('+++ WARNING: The submitted dataframe is empty, so'
' returned empty results!!! +++')
df_tec = df.loc[df['technology'].isin(tec_list)]
idx = [x for x in df.columns if x not in [year_col, value_col]]
df2 = df.pivot_table(index=idx, columns=year_col, values='value')
df2_tec = df_tec.pivot_table(index=idx, columns=year_col, values='value')
# -------------------------------------------------------------------------
# First, changing the time interval for the transition period
# (e.g., year 2010 in old R11 model transits from 5 year to 10 year)
horizon_new = sorted(horizon + [x for x in yrs_new if x not in horizon])
def f(x, i):
return x[i + 1] - x[i] > x[i] - x[i - 1]
yr_diff_new = [x for x in horizon_new[1:-1] if f(horizon_new,
horizon_new.index(x))]
# Generating duration_period_sum matrix for masking
df_dur = pd.DataFrame(index=horizon_new[:-1], columns=horizon_new[1:])
for i in df_dur.index:
for j in [x for x in df_dur.columns if x > i]:
df_dur.loc[i, j] = j - i
# Adding data for new transition year
if yr_diff_new and tec_list and year_diff not in yr_diff_new:
yrs = [x for x in horizon if x <= yr_diff_new[0]]
year_next = min([x for x in df2.columns if x > yr_diff_new[0]])
df_yrs = slice_df(df2_tec, idx, year_ref, yrs, [])
if yr_diff_new[0] in df2.columns:
df_yrs = df_yrs.loc[~pd.isna(df_yrs[yr_diff_new[0]]), :]
df_yrs = df_yrs.append(slice_df(df2_tec, idx, year_ref,
[year_next], []),
ignore_index=False).reset_index()
df_yrs = df_yrs.sort_values(idx).set_index(idx)
for yr in sorted([x for x in list(set(df_yrs.reset_index()[year_ref])
) if x < year_next]):
yr_next = min([x for x in horizon_new if x > yr])
d = slice_df(df_yrs, idx, year_ref, [yr], [])
d_n = slice_df(df_yrs, idx, year_ref, [yr_next], yr)
if d_n[year_next].loc[~pd.isna(d_n[year_next])].empty:
if [x for x in horizon_new if x > yr_next]:
yr_nn = min([x for x in horizon_new if x > yr_next])
else:
yr_nn = yr_next
d_n = slice_df(df_yrs, idx, year_ref, [yr_nn], yr)
d_n = d_n.loc[d_n.index.isin(d.index), :]
d = d.loc[d.index.isin(d_n.index), :]
d[d.isnull() & d_n.notnull()] = d_n
df2.loc[df2.index.isin(d.index), :] = d
cond1 = (df_dur.index <= yr_diff_new[0])
cond2 = (df_dur.columns >= year_next)
subt = yr_diff_new[0] - horizon_new[horizon_new.index(yr_diff_new[0]
) - 1]
df_dur.loc[cond1, cond2] = df_dur.loc[cond1, cond2] - subt
# -------------------------------------------------------------------------
# Second, adding year_act of new years if year_vtg is in existing years
for yr in yrs_new:
if yr > max(horizon):
extrapol = True
else:
extrapol = extrapolate
# a) If this new year is greater than modeled years, do extrapolation
if yr > horizon_new[horizon_new.index(max(df2.columns))] and extrapol:
year_pre = max([x for x in df2.columns if x < yr])
year_pp = max([x for x in df2.columns if x < year_pre])
df2[yr] = intpol(df2[year_pre], df2[year_pp],
year_pre, year_pp, yr)
df2[yr][np.isinf(df2[year_pre].shift(+1))
] = df2[year_pre].shift(+1)
df2[yr] = df2[yr].fillna(df2[year_pre])
j = horizon_new.index(yr)
if yr - horizon_new[j - 1] >= horizon_new[j - 1
] - horizon_new[j - 2]:
df2[yr].loc[(pd.isna(df2[year_pre].shift(+1))
) & (~pd.isna(df2[year_pp].shift(+1)))] = np.nan
cond = (df2[yr] < 0) & (df2[year_pre].shift(+1) >= 0)
if not df2[yr].loc[cond].empty and extrapol_neg:
df2.loc[cond, yr] = df2.loc[cond, year_pre] * extrapol_neg
# b) Otherise, do intrapolation
elif yr > min(df2.columns) and yr < max(df2.columns):
year_pre = max([x for x in df2.columns if x < yr])
year_next = min([x for x in df2.columns if x > yr])
df2[yr] = intpol(df2[year_pre], df2[year_next],
year_pre, year_next, yr)
df2_t = df2.loc[df2_tec.index, :].copy()
# This part calculates the missing value if only the previous
# timestep has a value (and not the next)
if tec_list:
cond = (pd.isna(df2_t[yr])) & (~pd.isna(df2_t[year_pre]))
df2_t[yr].loc[cond] = intpol(df2_t[year_pre],
df2_t[year_next].shift(-1),
year_pre, year_next, yr)
# Treating technologies with phase-out in model years
if [x for x in df2.columns if x < year_pre]:
year_pp = max([x for x in df2.columns if x < year_pre])
cond1 = (pd.isna(df2_t[yr])) & (~ | pd.isna(df2_t[year_pre]) | pandas.isna |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: <NAME>
date: 2019/11/26 16:33
contact: <EMAIL>
desc: 河北省空气质量预报信息发布系统
http://172.16.58.3/publish/
等级划分
1. 空气污染指数为0-50,空气质量级别为一级,空气质量状况属于优。此时,空气质量令人满意,基本无空气污染,各类人群可正常活动。
2. 空气污染指数为51-100,空气质量级别为二级,空气质量状况属于良。此时空气质量可接受,但某些污染物可能对极少数异常敏感人群健康有较弱影响,建议极少数异常敏感人群应减少户外活动。
3. 空气污染指数为101-150,空气质量级别为三级,空气质量状况属于轻度污染。此时,易感人群症状有轻度加剧,健康人群出现刺激症状。建议儿童、老年人及心脏病、呼吸系统疾病患者应减少长时间、高强度的户外锻炼。
4. 空气污染指数为151-200,空气质量级别为四级,空气质量状况属于中度污染。此时,进一步加剧易感人群症状,可能对健康人群心脏、呼吸系统有影响,建议疾病患者避免长时间、高强度的户外锻练,一般人群适量减少户外运动。
5. 空气污染指数为201-300,空气质量级别为五级,空气质量状况属于重度污染。此时,心脏病和肺病患者症状显著加剧,运动耐受力降低,健康人群普遍出现症状,建议儿童、老年人和心脏病、肺病患者应停留在室内,停止户外运动,一般人群减少户外运动。
6. 空气污染指数大于300,空气质量级别为六级,空气质量状况属于严重污染。此时,健康人群运动耐受力降低,有明显强烈症状,提前出现某些疾病,建议儿童、老年人和病人应当留在室内,避免体力消耗,一般人群应避免户外活动。
发布单位:河北省环境应急与重污染天气预警中心 技术支持:中国科学院大气物理研究所 中科三清科技有限公司
"""
import datetime
import pandas as pd
import requests
def air_hebei(city="唐山市"):
"""
河北省空气质量预报信息发布系统-空气质量预报, 未来 6 天
http://172.16.58.3/publish/
:param city: ['石家庄市', '唐山市', '秦皇岛市', '邯郸市', '邢台市', '保定市', '张家口市', '承德市', '沧州市', '廊坊市', '衡水市', '辛集市', '定州市']
:type city: str
:return: city = "", 返回所有地区的数据; city = "唐山市", 返回唐山市的数据
:rtype: pandas.DataFrame
"""
url = f"http://172.16.58.3/publishNewServer/api/CityPublishInfo/GetProvinceAndCityPublishData?publishDate={datetime.datetime.today().strftime('%Y-%m-%d')}%2016:00:00"
res = requests.get(url)
json_data = res.json()
city_list = pd.DataFrame.from_dict(json_data["cityPublishDatas"], orient="columns")["CityName"].tolist()
# 未来第一天, 以此类推
future_df_1 = pd.DataFrame.from_dict([item["Date1"] for item in json_data["cityPublishDatas"]], orient="columns")
future_df_2 = pd.DataFrame.from_dict([item["Date2"] for item in json_data["cityPublishDatas"]], orient="columns")
future_df_3 = | pd.DataFrame.from_dict([item["Date3"] for item in json_data["cityPublishDatas"]], orient="columns") | pandas.DataFrame.from_dict |
# -*- coding:utf8 -*-
import json
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.ocr.v20181119 import ocr_client, models
from screenshots import get_image_data
import pandas as pd
import datetime
import openpyxl
import json
import re
import base64
def tencentkey():
with open('apikey.json', 'r') as f:
data = json.load(f)
Secret_Id = data['tencentapi']['SecretId']
SECRET_KEY = data['tencentapi']['SecretKey']
cred = credential.Credential( Secret_Id , SECRET_KEY)
return cred
def tencentocrbasic1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralBasicOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralBasicOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocrbasic0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralBasicOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralBasicOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_script1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralHandwritingOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralHandwritingOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_script0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.GeneralHandwritingOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralHandwritingOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_hp1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred , "ap-beijing", clientProfile)
req = models.GeneralAccurateOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralAccurateOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_hp0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred , "ap-beijing", clientProfile)
req = models.GeneralAccurateOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.GeneralAccurateOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencentocr_eng1():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.EnglishOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.EnglishOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText
return alpha
def tencentocr_eng0():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.EnglishOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.EnglishOCR(req)
alpha = ''
for gama in resp.TextDetections:
alpha = alpha + gama.DetectedText+'\n'
return alpha
def tencent_table():
cred = tencentkey()
base64data = get_image_data()
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-beijing", clientProfile)
req = models.RecognizeTableOCRRequest()
params = {
"ImageBase64": base64data
}
req.from_json_string(json.dumps(params))
resp = client.RecognizeTableOCR(req)
result1 = json.loads(resp.to_json_string())
rowIndex = []
colIndex = []
content = []
for item in result1['TableDetections']:
for item2 in item['Cells']:
rowIndex.append(item2['RowTl'])
colIndex.append(item2['ColTl'])
content.append(item2['Text'])
rowIndex = pd.Series(rowIndex)
colIndex = | pd.Series(colIndex) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement DataFrame public API as Pandas does.
Almost all docstrings for public and magic methods should be inherited from Pandas
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manually add documentation for methods which are not presented in pandas.
"""
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from modin.config import IsExperimental
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__])
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""
Distributed DataFrame object backed by Pandas dataframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Series, arrays, constants, or list-like
objects.
index: pandas.Index, list, ObjectID
The row index for this DataFrame.
columns: pandas.Index
The column names for this DataFrame, in pandas Index object.
dtype: Data type to force.
Only a single dtype is allowed. If None, infer
copy: bool
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = pandas.get_option("display.max_columns") or 20
if pandas.get_option("display.max_columns") is None and pandas.get_option(
"display.expand_frame_repr"
):
width, _ = console.get_console_size()
width = min(width, len(self.columns))
col_counter = 0
i = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i += 1
num_cols = i
i = len(self.columns) - 1
col_counter = 0
while col_counter < width:
col_counter += len(str(self.columns[i])) + 1
i -= 1
num_cols += len(self.columns) - i
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self): # pragma: no cover
num_rows = pandas.get_option("max_rows") or 60
num_cols = pandas.get_option("max_columns") or 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_columns(self):
"""
Get the columns for this DataFrame.
Returns
-------
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_columns(self, new_columns):
"""
Set the columns for this DataFrame.
Parameters
----------
new_columns: The new index to set this
"""
self._query_compiler.columns = new_columns
columns = property(_get_columns, _set_columns)
@property
def ndim(self):
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
def drop_duplicates(
self, subset=None, keep="first", inplace=False, ignore_index=False
):
return super(DataFrame, self).drop_duplicates(
subset=subset, keep=keep, inplace=inplace
)
@property
def dtypes(self):
return self._query_compiler.dtypes
def duplicated(self, subset=None, keep="first"):
import hashlib
df = self[subset] if subset is not None else self
# if the number of columns we are checking for duplicates is larger than 1, we must
# hash them to generate a single value that can be compared across rows.
if len(df.columns) > 1:
hashed = df.apply(
lambda s: hashlib.new("md5", str(tuple(s)).encode()).hexdigest(), axis=1
).to_frame()
else:
hashed = df
duplicates = hashed.apply(lambda s: s.duplicated(keep=keep)).squeeze(axis=1)
# remove Series name which was assigned automatically by .apply
duplicates.name = None
return duplicates
@property
def empty(self):
return len(self.columns) == 0 or len(self.index) == 0
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return len(self.index), len(self.columns)
def add_prefix(self, prefix):
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
axis = self._get_axis_number(axis)
query_compiler = super(DataFrame, self).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
if not isinstance(query_compiler, type(self._query_compiler)):
return query_compiler
# This is the simplest way to determine the return type, but there are checks
# in pandas that verify that some results are created. This is a challenge for
# empty DataFrames, but fortunately they only happen when the `func` type is
# a list or a dictionary, which means that the return type won't change from
# type(self), so we catch that error and use `type(self).__name__` for the return
# type.
try:
if axis == 0:
init_kwargs = {"index": self.index}
else:
init_kwargs = {"columns": self.columns}
return_type = type(
getattr(pandas, type(self).__name__)(**init_kwargs).apply(
func, axis=axis, raw=raw, result_type=result_type, args=args, **kwds
)
).__name__
except Exception:
return_type = type(self).__name__
if return_type not in ["DataFrame", "Series"]:
return query_compiler.to_pandas().squeeze()
else:
result = getattr(sys.modules[self.__module__], return_type)(
query_compiler=query_compiler
)
if isinstance(result, Series):
if axis == 0 and result.name == self.index[0] or result.name == 0:
result.name = None
elif axis == 1 and result.name == self.columns[0] or result.name == 0:
result.name = None
return result
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze: bool = no_default,
observed=False,
dropna: bool = True,
):
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=2,
)
else:
squeeze = False
axis = self._get_axis_number(axis)
idx_name = None
# Drop here indicates whether or not to drop the data column before doing the
# groupby. The typical pandas behavior is to drop when the data came from this
# dataframe. When a string, Series directly from this dataframe, or list of
# strings is passed in, the data used for the groupby is dropped before the
# groupby takes place.
drop = False
if (
not isinstance(by, (pandas.Series, Series))
and is_list_like(by)
and len(by) == 1
):
by = by[0]
if callable(by):
by = self.index.map(by)
elif isinstance(by, str):
drop = by in self.columns
idx_name = by
if (
self._query_compiler.has_multiindex(axis=axis)
and by in self.axes[axis].names
or hasattr(self.axes[axis], "name")
and self.axes[axis].name == by
):
# In this case we pass the string value of the name through to the
# partitions. This is more efficient than broadcasting the values.
pass
else:
by = self.__getitem__(by)._query_compiler
elif isinstance(by, Series):
drop = by._parent is self
idx_name = by.name
by = by._query_compiler
elif is_list_like(by):
# fastpath for multi column groupby
if (
not isinstance(by, Series)
and axis == 0
and all(
(
(isinstance(o, str) and (o in self))
or (isinstance(o, Series) and (o._parent is self))
)
for o in by
)
):
# We can just revert Series back to names because the parent is
# this dataframe:
by = [o.name if isinstance(o, Series) else o for o in by]
by = self.__getitem__(by)._query_compiler
drop = True
else:
mismatch = len(by) != len(self.axes[axis])
if mismatch and all(
isinstance(obj, str)
and (
obj in self
or (hasattr(self.index, "names") and obj in self.index.names)
)
for obj in by
):
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch and any(
isinstance(obj, str) and obj not in self.columns for obj in by
):
names = [o.name if isinstance(o, Series) else o for o in by]
raise KeyError(next(x for x in names if x not in self))
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
observed=observed,
drop=drop,
dropna=dropna,
)
def keys(self):
return self.columns
def transpose(self, copy=False, *args):
return DataFrame(query_compiler=self._query_compiler.transpose(*args))
T = property(transpose)
def add(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"add",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
def append(self, other, ignore_index=False, verify_integrity=False, sort=False):
if sort is False:
warnings.warn(
"Due to https://github.com/pandas-dev/pandas/issues/35092, "
"Pandas ignores sort=False; Modin correctly does not sort."
)
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True"
" or if the Series has a name"
)
if other.name is not None:
# other must have the same index name as self, otherwise
# index name will be reset
name = other.name
# We must transpose here because a Series becomes a new row, and the
# structure of the query compiler is currently columnar
other = other._query_compiler.transpose()
other.index = pandas.Index([name], name=self.index.name)
else:
# See note above about transpose
other = other._query_compiler.transpose()
elif isinstance(other, list):
if not all(isinstance(o, BasePandasDataset) for o in other):
other = DataFrame(pandas.DataFrame(other))._query_compiler
else:
other = [obj._query_compiler for obj in other]
else:
other = other._query_compiler
# If ignore_index is False, by definition the Index will be correct.
# We also do this first to ensure that we don't waste compute/memory.
if verify_integrity and not ignore_index:
appended_index = (
self.index.append(other.index)
if not isinstance(other, list)
else self.index.append([o.index for o in other])
)
is_valid = next((False for idx in appended_index.duplicated() if idx), True)
if not is_valid:
raise ValueError(
"Indexes have overlapping values: {}".format(
appended_index[appended_index.duplicated()]
)
)
query_compiler = self._query_compiler.concat(
0, other, ignore_index=ignore_index, sort=sort
)
return DataFrame(query_compiler=query_compiler)
def assign(self, **kwargs):
df = self.copy()
for k, v in kwargs.items():
if callable(v):
df[k] = v(df)
else:
df[k] = v
return df
def boxplot(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
return to_pandas(self).boxplot(
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
backend=backend,
**kwargs,
)
def combine(self, other, func, fill_value=None, overwrite=True):
return super(DataFrame, self).combine(
other, func, fill_value=fill_value, overwrite=overwrite
)
def compare(
self,
other: "DataFrame",
align_axis: Union[str, int] = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> "DataFrame":
return self._default_to_pandas(
pandas.DataFrame.compare,
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def corr(self, method="pearson", min_periods=1):
return self.__constructor__(
query_compiler=self._query_compiler.corr(
method=method,
min_periods=min_periods,
)
)
def corrwith(self, other, axis=0, drop=False, method="pearson"):
if isinstance(other, DataFrame):
other = other._query_compiler.to_pandas()
return self._default_to_pandas(
pandas.DataFrame.corrwith, other, axis=axis, drop=drop, method=method
)
def cov(self, min_periods=None, ddof: Optional[int] = 1):
numeric_df = self.drop(
columns=[
i for i in self.dtypes.index if not is_numeric_dtype(self.dtypes[i])
]
)
is_notna = True
if all(numeric_df.notna().all()):
if min_periods is not None and min_periods > len(numeric_df):
result = np.empty((numeric_df.shape[1], numeric_df.shape[1]))
result.fill(np.nan)
return numeric_df.__constructor__(result)
else:
cols = numeric_df.columns
idx = cols.copy()
numeric_df = numeric_df.astype(dtype="float64")
denom = 1.0 / (len(numeric_df) - ddof)
means = numeric_df.mean(axis=0)
result = numeric_df - means
result = result.T._query_compiler.conj().dot(result._query_compiler)
else:
result = numeric_df._query_compiler.cov(min_periods=min_periods)
is_notna = False
if is_notna:
result = numeric_df.__constructor__(
query_compiler=result, index=idx, columns=cols
)
result *= denom
else:
result = numeric_df.__constructor__(query_compiler=result)
return result
def dot(self, other):
if isinstance(other, BasePandasDataset):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("Matrices are not aligned")
qc = other.reindex(index=common)._query_compiler
if isinstance(other, DataFrame):
return self.__constructor__(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=False
)
)
else:
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(
qc, squeeze_self=False, squeeze_other=True
)
)
other = np.asarray(other)
if self.shape[1] != other.shape[0]:
raise ValueError(
"Dot product shape mismatch, {} vs {}".format(self.shape, other.shape)
)
if len(other.shape) > 1:
return self.__constructor__(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
return self._reduce_dimension(
query_compiler=self._query_compiler.dot(other, squeeze_self=False)
)
def eq(self, other, axis="columns", level=None):
return self._binary_op(
"eq", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def equals(self, other):
if isinstance(other, pandas.DataFrame):
# Copy into a Modin DataFrame to simplify logic below
other = DataFrame(other)
return (
self.index.equals(other.index)
and self.columns.equals(other.columns)
and self.eq(other).all().all()
)
def explode(self, column: Union[str, Tuple], ignore_index: bool = False):
return self._default_to_pandas(
pandas.DataFrame.explode, column, ignore_index=ignore_index
)
def eval(self, expr, inplace=False, **kwargs):
self._validate_eval_query(expr, **kwargs)
inplace = validate_bool_kwarg(inplace, "inplace")
new_query_compiler = self._query_compiler.eval(expr, **kwargs)
return_type = type(
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.eval(expr, **kwargs)
).__name__
if return_type == type(self).__name__:
return self._create_or_update_from_compiler(new_query_compiler, inplace)
else:
if inplace:
raise ValueError("Cannot operate inplace if there is no assignment")
return getattr(sys.modules[self.__module__], return_type)(
query_compiler=new_query_compiler
)
def floordiv(self, other, axis="columns", level=None, fill_value=None):
return self._binary_op(
"floordiv",
other,
axis=axis,
level=level,
fill_value=fill_value,
broadcast=isinstance(other, Series),
)
@classmethod
def from_dict(
cls, data, orient="columns", dtype=None, columns=None
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_dict`")
return from_pandas(
pandas.DataFrame.from_dict(
data, orient=orient, dtype=dtype, columns=columns
)
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
): # pragma: no cover
ErrorMessage.default_to_pandas("`from_records`")
return from_pandas(
pandas.DataFrame.from_records(
data,
index=index,
exclude=exclude,
columns=columns,
coerce_float=coerce_float,
nrows=nrows,
)
)
def ge(self, other, axis="columns", level=None):
return self._binary_op(
"ge", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def gt(self, other, axis="columns", level=None):
return self._binary_op(
"gt", other, axis=axis, level=level, broadcast=isinstance(other, Series)
)
def hist(
self,
column=None,
by=None,
grid=True,
xlabelsize=None,
xrot=None,
ylabelsize=None,
yrot=None,
ax=None,
sharex=False,
sharey=False,
figsize=None,
layout=None,
bins=10,
**kwds,
): # pragma: no cover
return self._default_to_pandas(
pandas.DataFrame.hist,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
bins=bins,
**kwds,
)
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
):
def put_str(src, output_len=None, spaces=2):
src = str(src)
return src.ljust(output_len if output_len else len(src)) + " " * spaces
def format_size(num):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f} {x}"
num /= 1024.0
return f"{num:3.1f} PB"
output = []
type_line = str(type(self))
index_line = self.index._summary()
columns = self.columns
columns_len = len(columns)
dtypes = self.dtypes
dtypes_line = f"dtypes: {', '.join(['{}({})'.format(dtype, count) for dtype, count in dtypes.value_counts().items()])}"
if max_cols is None:
max_cols = 100
exceeds_info_cols = columns_len > max_cols
if buf is None:
buf = sys.stdout
if null_counts is None:
null_counts = not exceeds_info_cols
if verbose is None:
verbose = not exceeds_info_cols
if null_counts and verbose:
# We're gonna take items from `non_null_count` in a loop, which
# works kinda slow with `Modin.Series`, that's why we call `_to_pandas()` here
# that will be faster.
non_null_count = self.count()._to_pandas()
if memory_usage is None:
memory_usage = True
def get_header(spaces=2):
output = []
head_label = " # "
column_label = "Column"
null_label = "Non-Null Count"
dtype_label = "Dtype"
non_null_label = " non-null"
delimiter = "-"
lengths = {}
lengths["head"] = max(len(head_label), len(pprint_thing(len(columns))))
lengths["column"] = max(
len(column_label), max(len(pprint_thing(col)) for col in columns)
)
lengths["dtype"] = len(dtype_label)
dtype_spaces = (
max(lengths["dtype"], max(len(pprint_thing(dtype)) for dtype in dtypes))
- lengths["dtype"]
)
header = put_str(head_label, lengths["head"]) + put_str(
column_label, lengths["column"]
)
if null_counts:
lengths["null"] = max(
len(null_label),
max(len(pprint_thing(x)) for x in non_null_count)
+ len(non_null_label),
)
header += put_str(null_label, lengths["null"])
header += put_str(dtype_label, lengths["dtype"], spaces=dtype_spaces)
output.append(header)
delimiters = put_str(delimiter * lengths["head"]) + put_str(
delimiter * lengths["column"]
)
if null_counts:
delimiters += put_str(delimiter * lengths["null"])
delimiters += put_str(delimiter * lengths["dtype"], spaces=dtype_spaces)
output.append(delimiters)
return output, lengths
output.extend([type_line, index_line])
def verbose_repr(output):
columns_line = f"Data columns (total {len(columns)} columns):"
header, lengths = get_header()
output.extend([columns_line, *header])
for i, col in enumerate(columns):
i, col, dtype = map(pprint_thing, [i, col, dtypes[col]])
to_append = put_str(" {}".format(i), lengths["head"]) + put_str(
col, lengths["column"]
)
if null_counts:
non_null = pprint_thing(non_null_count[col])
to_append += put_str(
"{} non-null".format(non_null), lengths["null"]
)
to_append += put_str(dtype, lengths["dtype"], spaces=0)
output.append(to_append)
def non_verbose_repr(output):
output.append(columns._summary(name="Columns"))
if verbose:
verbose_repr(output)
else:
non_verbose_repr(output)
output.append(dtypes_line)
if memory_usage:
deep = memory_usage == "deep"
mem_usage_bytes = self.memory_usage(index=True, deep=deep).sum()
mem_line = f"memory usage: {format_size(mem_usage_bytes)}"
output.append(mem_line)
output.append("")
buf.write("\n".join(output))
def insert(self, loc, column, value, allow_duplicates=False):
if isinstance(value, (DataFrame, pandas.DataFrame)):
if len(value.columns) != 1:
raise ValueError("Wrong number of items passed 2, placement implies 1")
value = value.iloc[:, 0]
if isinstance(value, Series):
# TODO: Remove broadcast of Series
value = value._to_pandas()
if not self._query_compiler.lazy_execution and len(self.index) == 0:
try:
value = pandas.Series(value)
except (TypeError, ValueError, IndexError):
raise ValueError(
"Cannot insert into a DataFrame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
new_index = value.index.copy()
new_columns = self.columns.insert(loc, column)
new_query_compiler = DataFrame(
value, index=new_index, columns=new_columns
)._query_compiler
elif len(self.columns) == 0 and loc == 0:
new_query_compiler = DataFrame(
data=value, columns=[column], index=self.index
)._query_compiler
else:
if (
is_list_like(value)
and not isinstance(value, pandas.Series)
and len(value) != len(self.index)
):
raise ValueError("Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError("cannot insert {0}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
loc, len(self.columns)
)
)
if loc < 0:
raise ValueError("unbounded slice")
new_query_compiler = self._query_compiler.insert(loc, column, value)
self._update_inplace(new_query_compiler=new_query_compiler)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction: Optional[str] = None,
limit_area=None,
downcast=None,
**kwargs,
):
return self._default_to_pandas(
pandas.DataFrame.interpolate,
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def iterrows(self):
def iterrow_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 0, iterrow_builder)
for v in partition_iterator:
yield v
def items(self):
def items_builder(s):
return s.name, s
partition_iterator = PartitionIterator(self, 1, items_builder)
for v in partition_iterator:
yield v
def iteritems(self):
return self.items()
def itertuples(self, index=True, name="Pandas"):
def itertuples_builder(s):
return next(s._to_pandas().to_frame().T.itertuples(index=index, name=name))
partition_iterator = PartitionIterator(self, 0, itertuples_builder)
for v in partition_iterator:
yield v
def join(self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False):
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if on is not None:
return self.__constructor__(
query_compiler=self._query_compiler.join(
other._query_compiler,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
)
)
if isinstance(other, DataFrame):
# Joining the empty DataFrames with either index or columns is
# fast. It gives us proper error checking for the edge cases that
# would otherwise require a lot more logic.
new_columns = (
pandas.DataFrame(columns=self.columns)
.join(
| pandas.DataFrame(columns=other.columns) | pandas.DataFrame |
# -*- coding:utf-8 -*-
"""Training and inference for tabular datasets using neural nets."""
import datetime
import os
import numpy as np
import time
import pandas as pd
import pickle
import shutil
from joblib import Parallel, delayed
from sklearn.metrics import roc_auc_score
from sklearn.utils.validation import check_array
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import Concatenate, BatchNormalization
from tensorflow.keras.utils import to_categorical
from . import modelset, deepnets
from .config import ModelConfig
from .deepmodel import DeepModel
from .preprocessor import DefaultPreprocessor
from ..utils import dt_logging, consts
from ..utils.tf_version import tf_less_than
logger = dt_logging.get_logger()
class DeepTable:
"""`DeepTables` can be use to solve classification and regression prediction problems on tabular datasets.
Easy to use and provide good performance out of box, no datasets preprocessing is required.
Arguments
---------
config : ModelConfig
Options of ModelConfig
----------------------
name: str, (default='conf-1')
nets: list of str or callable object, (default=['dnn_nets'])
Preset Nets
-----------
- DeepFM -> ['linear','dnn_nets','fm_nets']
- xDeepFM
- DCN
- PNN
- WideDeep
- AutoInt
- AFM
- FGCNN
- FibiNet
Avalible Build Blocks
---------------------
- 'dnn_nets'
- 'linear'
- 'cin_nets'
- 'fm_nets'
- 'afm_nets'
- 'opnn_nets'
- 'ipnn_nets'
- 'pnn_nets',
- 'cross_nets'
- 'cross_dnn_nets'
- 'dcn_nets',
- 'autoint_nets'
- 'fg_nets'
- 'fgcnn_cin_nets'
- 'fgcnn_fm_nets'
- 'fgcnn_ipnn_nets'
- 'fgcnn_dnn_nets'
- 'fibi_nets'
- 'fibi_dnn_nets'
Examples
--------
>>>from deeptables.models import deepnets
>>>#preset nets
>>>conf = ModelConfig(nets=deepnets.DeepFM)
>>>#list of names of nets
>>>conf = ModelConfig(nets=['linear','dnn_nets','cin_nets','cross_nets'])
>>>#mixed preset nets and names
>>>conf = ModelConfig(nets=deepnets.WideDeep+['cin_nets'])
>>>#mixed names and custom nets
>>>def custom_net(embeddings, flatten_emb_layer, dense_layer, concat_emb_dense, config, model_desc):
>>> out = layers.Dense(10)(flatten_emb_layer)
>>> return out
>>>conf = ModelConfig(nets=['linear', custom_net])
categorical_columns: list of strings, (default='auto')
- 'auto'
get the columns of categorical type automatically. By default, the object,
bool and category will be selected.
if 'auto' the [auto_categorize] will no longer takes effect.
- list of strings
e.g. ['x1','x2','x3','..']
exclude_columns: list of strings, (default=[])
pos_label: str or int, (default=None)
The label of positive class, used only when task is binary.
metrics: list of string or callable object, (default=['accuracy'])
List of metrics to be evaluated by the model during training and testing.
Typically you will use `metrics=['accuracy']` or `metrics=['AUC']`.
Every metric should be a built-in evaluation metric in tf.keras.metrics or a callable object
like `r2(y_true, y_pred):...` .
See also: https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf/keras/metrics
auto_categorize: bool, (default=False)
cat_exponent: float, (default=0.5)
cat_remain_numeric: bool, (default=True)
auto_encode_label: bool, (default=True)
auto_imputation: bool, (default=True)
auto_discrete: bool, (default=False)
apply_gbm_features: bool, (default=False)
gbm_params: dict, (default={})
gbm_feature_type: str, (default=embedding)
- embedding
- dense
fixed_embedding_dim: bool, (default=True)
embeddings_output_dim: int, (default=4)
embeddings_initializer: str or object, (default='uniform')
Initializer for the `embeddings` matrix.
embeddings_regularizer: str or object, (default=None)
Regularizer function applied to the `embeddings` matrix.
dense_dropout: float, (default=0) between 0 and 1
Fraction of the dense input units to drop.
embedding_dropout: float, (default=0.3) between 0 and 1
Fraction of the embedding input units to drop.
stacking_op: str, (default='add')
- add
- concat
output_use_bias: bool, (default=True)
apply_class_weight: bool, (default=False)
optimizer: str or object, (default='auto')
- auto
- str
- object
loss: str or object, (default='auto')
dnn_params: dict, (default={'hidden_units': ((128, 0, False), (64, 0, False)),
'dnn_activation': 'relu'})
autoint_params:dict, (default={'num_attention': 3,'num_heads': 1,
'dropout_rate': 0,'use_residual': True})
fgcnn_params={'fg_filters': (14, 16),
'fg_widths': (7, 7),
'fg_pool_widths': (2, 2),
'fg_new_feat_filters': (2, 2),
},
fibinet_params={
'senet_pooling_op': 'mean',
'senet_reduction_ratio': 3,
'bilinear_type': 'field_interaction',
},
cross_params={
'num_cross_layer': 4,
},
pnn_params={
'outer_product_kernel_type': 'mat',
},
afm_params={
'attention_factor': 4,
'dropout_rate': 0
},
cin_params={
'cross_layer_size': (128, 128),
'activation': 'relu',
'use_residual': False,
'use_bias': False,
'direct': False,
'reduce_D': False,
},
home_dir: str, (default=None)
The home directory for saving model-related files. Each time running `fit(...)`
or `fit_cross_validation(...)`, a subdirectory with a time-stamp will be created
in this directory.
monitor_metric: str, (default=None)
earlystopping_patience: int, (default=1)
gpu_usage_strategy: str, (default='memory_growth')
- memory_growth
- None
distribute_strategy: tensorflow.python.distribute.distribute_lib.Strategy, (default=None)
-
Attributes
----------
task: str
Type of prediction problem, if 'config.task = None'(by default), it will be inferred
base on the values of `y` when calling 'fit(...)' or 'fit_cross_validation(...)'.
-'binary' : binary classification task
-'multiclass' multiclass classfication task
-'regression' regression task
num_classes: int
The number of classes, used only when task is multiclass.
pos_label: str or int
The label of positive class, used only when task is binary.
output_path: str
Path to directory used to save models. In addition, if a valid 'X_test' is passed into
`fit_cross_validation(...)`, the prediction results of the test set will be saved in
this path as well.
The path is a subdirectory with time-stamp created in the `home directory`. `home directory`
is specified through `config.home_dir`, if `config.home_dir=None` `output_path` will be created
in working directory.
preprocessor: AbstractPreprocessor (default = DefaultPreprocessor)
Preprocessor is used to perform datasets preprocessing, such as categorization, label encoding,
imputation, discretization, etc., before feeding into neural nets.
nets: list(str)
List of the network cells used to build the DeepModel
monitor: str
The metric for monitoring the quality of model in early_stopping, if not specified, the
first metric in [config.metrics] will be used. (e.g. log_loss/auc_val/accuracy_val...)
modelset: ModelSet
The models produced by `fit(...)` or `fit_cross_validation(...)`
best_model: Model
A set of models will be produced by `fit_cross_validation(...)`, instead of only one
model by `fit(...)`. The Best Model is the model with best performance on specific metric.
The first metric in [config.metrics] will be used by default.
leaderboard: pandas.DataFrame
List sorted by specific metric with some meta information and scores. The first metric
in [config.metrics] will be used by default.
References
----------
.. [1] ``_
See also
--------
Examples
--------
>>>X_train = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/train.csv')
>>>X_eval = pd.read_csv('https://storage.googleapis.com/tf-datasets/titanic/eval.csv')
>>>y_train = X_train.pop('survived')
>>>y_eval = X_eval.pop('survived')
>>>
>>>config = ModelConfig(nets=deepnets.DeepFM, fixed_embedding_dim=True, embeddings_output_dim=4, auto_discrete=True)
>>>dt = DeepTable(config=config)
>>>
>>>model, history = dt.fit(train, y_train, epochs=100)
>>>preds = dt.predict(X_eval)
"""
def __init__(self, config=None, preprocessor=None):
if config is None:
config = ModelConfig()
self.config = config
self.nets = config.nets
self.output_path = self._prepare_output_dir(config.home_dir, self.nets)
self.preprocessor = preprocessor if preprocessor is not None else DefaultPreprocessor(config)
self.__current_model = None
self.__modelset = modelset.ModelSet(metric=self.config.first_metric_name,
best_mode=consts.MODEL_SELECT_MODE_AUTO)
@property
def task(self):
return self.preprocessor.task
@property
def num_classes(self):
return len(self.preprocessor.labels)
@property
def classes_(self):
return self.preprocessor.labels
@property
def pos_label(self):
if self.config.pos_label is not None:
return self.config.pos_label
else:
return self.preprocessor.pos_label
@property
def monitor(self):
monitor = self.config.monitor_metric
if monitor is None:
if self.config.metrics is not None and len(self.config.metrics) > 0:
monitor = 'val_' + self.config.first_metric_name
return monitor
@property
def modelset(self):
return self.__modelset
@property
def best_model(self):
return self.__modelset.best_model().model
@property
def leaderboard(self):
return self.__modelset.leaderboard()
def fit(self, X=None, y=None, batch_size=128, epochs=1, verbose=1, callbacks=None,
validation_split=0.2, validation_data=None, shuffle=True,
class_weight=None, sample_weight=None,
initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False):
logger.info(f'X.Shape={np.shape(X)}, y.Shape={np.shape(y)}, batch_size={batch_size}, config={self.config}')
logger.info(f'metrics:{self.config.metrics}')
self.__modelset.clear()
X, y = self.preprocessor.fit_transform(X, y)
if validation_data is not None:
validation_data = self.preprocessor.transform(*validation_data)
logger.info(f'Training...')
if class_weight is None and self.config.apply_class_weight and self.task != consts.TASK_REGRESSION:
class_weight = self.get_class_weight(y)
callbacks = self.__inject_callbacks(callbacks)
model = DeepModel(self.task, self.num_classes, self.config,
self.preprocessor.categorical_columns,
self.preprocessor.continuous_columns)
history = model.fit(X, y, batch_size=batch_size, epochs=epochs, verbose=verbose, shuffle=shuffle,
validation_split=validation_split, validation_data=validation_data,
validation_steps=validation_steps, validation_freq=validation_freq,
callbacks=callbacks, class_weight=class_weight, sample_weight=sample_weight,
initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)
name = f'{"+".join(self.nets)}'
logger.info(f'Training finished.')
self.__set_model('val', name, model, history.history)
return model, history
def fit_cross_validation(self, X, y, X_eval=None, X_test=None, num_folds=5, stratified=False, iterators=None,
batch_size=None, epochs=1, verbose=1, callbacks=None, n_jobs=1, random_state=9527,
shuffle=True, class_weight=None, sample_weight=None,
initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False
):
print("Start cross validation")
start = time.time()
logger.info(f'X.Shape={np.shape(X)}, y.Shape={np.shape(y)}, batch_size={batch_size}, config={self.config}')
logger.info(f'metrics:{self.config.metrics}')
self.__modelset.clear()
X, y = self.preprocessor.fit_transform(X, y)
if X_eval is not None:
print(f'transform X_eval')
X_eval = self.preprocessor.transform_X(X_eval)
if X_test is not None:
print(f'transform X_test')
X_test = self.preprocessor.transform_X(X_test)
if iterators is None:
if stratified and self.task != consts.TASK_REGRESSION:
iterators = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=random_state)
else:
iterators = KFold(n_splits=num_folds, shuffle=True, random_state=random_state)
print(f'Iterators:{iterators}')
test_proba_mean = None
eval_proba_mean = None
if self.task in (consts.TASK_MULTICLASS, consts.TASK_MULTILABEL):
oof_proba = np.zeros((y.shape[0], self.num_classes))
else:
oof_proba = np.zeros((y.shape[0], 1))
y = np.array(y)
if class_weight is None and self.config.apply_class_weight and self.task == consts.TASK_BINARY:
class_weight = self.get_class_weight(y)
callbacks = self.__inject_callbacks(callbacks)
parallel = Parallel(n_jobs=n_jobs, verbose=verbose)
fit_and_score_kwargs = dict(
batch_size=batch_size, epochs=epochs, verbose=verbose,
callbacks=callbacks, class_weight=class_weight, shuffle=shuffle, sample_weight=sample_weight,
validation_steps=validation_steps, validation_freq=validation_freq,
initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing
)
with parallel:
out = parallel(delayed(_fit_and_score)(
self.task, self.num_classes, self.config,
self.preprocessor.categorical_columns, self.preprocessor.continuous_columns,
n_fold, valid_idx,
X.iloc[train_idx], y[train_idx], X.iloc[valid_idx], y[valid_idx],
X_eval, X_test, f'{self.output_path}{"_".join(self.nets)}-kfold-{n_fold + 1}.h5',
**fit_and_score_kwargs)
for n_fold, (train_idx, valid_idx) in enumerate(iterators.split(X, y)))
for n_fold, idx, history, fold_oof_proba, fold_eval_proba, fold_test_proba in out:
oof_proba[idx] = fold_oof_proba
if X_eval is not None:
if eval_proba_mean is None:
eval_proba_mean = fold_eval_proba / num_folds
else:
eval_proba_mean += fold_eval_proba / num_folds
if X_test is not None:
if test_proba_mean is None:
test_proba_mean = fold_test_proba / num_folds
else:
test_proba_mean += fold_test_proba / num_folds
self.__push_model('val', f'{"+".join(self.nets)}-kfold-{n_fold + 1}',
f'{self.output_path}{"_".join(self.nets)}-kfold-{n_fold + 1}.h5', history)
if oof_proba.shape[-1] == 1:
oof_proba = oof_proba.reshape(-1)
if eval_proba_mean is not None and eval_proba_mean.shape[-1] == 1:
eval_proba_mean = eval_proba_mean.reshape(-1)
if test_proba_mean is not None and test_proba_mean.shape[-1] == 1:
test_proba_mean = test_proba_mean.reshape(-1)
file = f'{self.output_path}{"_".join(self.nets)}-cv-{num_folds}.csv'
pd.DataFrame(test_proba_mean).to_csv(file, index=False)
logger.info(f'fit_cross_validation taken {time.time() - start}s')
return oof_proba, eval_proba_mean, test_proba_mean
def evaluate(self, X_test, y_test, batch_size=256, verbose=0, model_selector=consts.MODEL_SELECTOR_CURRENT, ):
X_t, y_t = self.preprocessor.transform(X_test, y_test)
y_t = np.array(y_t)
model = self.get_model(model_selector)
if not isinstance(model, DeepModel):
raise ValueError(f'Wrong model_selector:{model_selector}')
result = model.evaluate(X_t, y_t, batch_size=batch_size, verbose=verbose)
return result
def predict_proba(self, X, batch_size=128, verbose=0,
model_selector=consts.MODEL_SELECTOR_CURRENT, auto_transform_data=True, ):
start = time.time()
if model_selector == consts.MODEL_SELECTOR_ALL:
models = self.get_model(model_selector)
proba_avg = None
if auto_transform_data:
X = self.preprocessor.transform_X(X)
for model in models:
proba = self.__predict(model, X, batch_size=batch_size, verbose=verbose, auto_transform_data=False)
if proba_avg is None:
proba_avg = np.zeros(proba.shape)
proba_avg += proba
proba_avg /= len(models)
logger.info(f'predict_proba taken {time.time() - start}s')
return proba_avg
else:
proba = self.__predict(self.get_model(model_selector),
X, batch_size=batch_size,
verbose=verbose,
auto_transform_data=auto_transform_data)
logger.info(f'predict_proba taken {time.time() - start}s')
return proba
def predict_proba_all(self, X, batch_size=128, verbose=0, auto_transform_data=True, ):
mis = self.__modelset.get_modelinfos()
proba_all = {}
if auto_transform_data:
X = self.preprocessor.transform_X(X)
for mi in mis:
model = self.get_model(mi.name)
proba = self.__predict(model, X, batch_size=batch_size, verbose=verbose, auto_transform_data=False)
proba_all[mi.name] = proba
return proba_all
def predict(self, X, encode_to_label=True, batch_size=128, verbose=0,
model_selector=consts.MODEL_SELECTOR_CURRENT, auto_transform_data=True):
proba = self.predict_proba(X, batch_size, verbose,
model_selector=model_selector,
auto_transform_data=auto_transform_data)
return self.proba2predict(proba, encode_to_label)
def proba2predict(self, proba, encode_to_label=True):
if self.task == consts.TASK_REGRESSION:
return proba
if proba is None:
raise ValueError('[proba] can not be none.')
if len(proba.shape) == 1:
proba = proba.reshape((-1, 1))
if proba.shape[-1] > 1:
predict = proba.argmax(axis=-1)
else:
predict = (proba > 0.5).astype(consts.DATATYPE_PREDICT_CLASS)
if encode_to_label:
logger.info('Reverse indicators to labels.')
predict = self.preprocessor.inverse_transform_y(predict)
return predict
def apply(self, X, output_layers, concat_outputs=False, batch_size=128, verbose=0,
model_selector=consts.MODEL_SELECTOR_CURRENT, auto_transform_data=True, transformer=None):
start = time.time()
model = self.get_model(model_selector)
if not isinstance(model, DeepModel):
raise ValueError(f'Wrong model_selector:{model_selector}')
if auto_transform_data:
X = self.preprocessor.transform_X(X)
output = model.apply(X, output_layers, concat_outputs, batch_size, verbose, transformer)
logger.info(f'apply taken {time.time() - start}s')
return output
def concat_emb_dense(self, flatten_emb_layer, dense_layer):
x = None
if flatten_emb_layer is not None and dense_layer is not None:
x = Concatenate(name='concat_embedding_dense')([flatten_emb_layer, dense_layer])
elif flatten_emb_layer is not None:
x = flatten_emb_layer
elif dense_layer is not None:
x = dense_layer
else:
raise ValueError('No input layer exists.')
x = BatchNormalization(name='bn_concat_emb_dense')(x)
logger.info(f'Concat embedding and dense layer shape:{x.shape}')
return x
def get_model(self, model_selector=consts.MODEL_SELECTOR_CURRENT, ):
if model_selector == consts.MODEL_SELECTOR_CURRENT:
# get model by name
mi = self.__modelset.get_modelinfo(self.__current_model)
elif model_selector == consts.MODEL_SELECTOR_BEST:
mi = self.__modelset.best_model()
elif model_selector == consts.MODEL_SELECTOR_ALL:
ms = []
for mi in self.__modelset.get_modelinfos():
if isinstance(mi.model, str):
dm = self.load_deepmodel(mi.model)
mi.model = dm
ms.append(mi.model)
return ms
else:
# get model by name
mi = self.__modelset.get_modelinfo(model_selector)
if mi is None:
raise ValueError(f'{model_selector} does not exsit.')
if isinstance(mi.model, str):
dm = self.load_deepmodel(mi.model)
mi.model = dm
return mi.model
def get_class_weight(self, y):
print('Calc classes weight.')
if len(y.shape) == 1:
y = to_categorical(y)
y_sum = y.sum(axis=0)
class_weight = {}
total = y.shape[0]
classes = len(y_sum)
print(f"Examples:\nTotal:{total}")
for i in range(classes):
weight = total / y_sum[i] / classes
class_weight[i] = weight
print(f'class {i}:{weight}')
return class_weight
def _prepare_output_dir(self, home_dir, nets):
if home_dir is None:
home_dir = 'dt_output'
if home_dir[-1] == '/':
home_dir = home_dir[:-1]
running_dir = f'dt_{datetime.datetime.now().__format__("%Y%m%d %H%M%S")}_{"_".join(nets)}'
output_path = os.path.expanduser(f'{home_dir}/{running_dir}/')
if not os.path.exists(output_path):
os.makedirs(output_path)
return output_path
def __predict(self, model, X, batch_size=128, verbose=0, auto_transform_data=True, ):
logger.info("Perform prediction...")
if auto_transform_data:
X = self.preprocessor.transform_X(X)
return model.predict(X, batch_size=batch_size, verbose=verbose)
def __set_model(self, type, name, model, history):
self.__modelset.clear()
self.__push_model(type, name, model, history)
def __push_model(self, type, name, model, history, save_model=True):
modelfile = ''
if save_model and isinstance(model, DeepModel):
modelfile = f'{self.output_path}{name}.h5'
model.save(modelfile)
print(f'Model has been saved to:{modelfile}')
mi = modelset.ModelInfo(type, name, model, {}, history=history, modelfile=modelfile)
self.__modelset.push(mi)
self.__current_model = mi.name
def __inject_callbacks(self, callbacks):
# mcp = None
es = None
if callbacks is not None:
for callback in callbacks:
# if isinstance(callback, ModelCheckpoint):
# mcp = callback
if isinstance(callback, EarlyStopping):
es = callback
else:
callbacks = []
if 'auc' in self.monitor.lower() or 'acc' in self.monitor.lower():
mode = 'max'
else:
mode = 'min'
# if mcp is None:
# mcp = ModelCheckpoint(self.model_filepath,
# monitor=self.monitor,
# verbose=0,
# save_best_only=False,
# save_weights_only=False,
# mode=mode,
# save_freq='epoch',
# )
# callbacks.append(mcp)
# print(f'Injected a callback [ModelCheckpoint].\nfilepath:{mcp.filepath}\nmonitor:{mcp.monitor}')
if es is None:
es = EarlyStopping(monitor=self.monitor if tf_less_than('2.2') else self.monitor.lower(),
restore_best_weights=True,
patience=self.config.earlystopping_patience,
verbose=1,
# min_delta=0.0001,
mode=mode,
baseline=None,
)
callbacks.append(es)
print(f'Injected a callback [EarlyStopping]. monitor:{es.monitor}, patience:{es.patience}, mode:{mode}')
return callbacks
def save(self, filepath, deepmodel_basename=None):
if filepath[-1] != '/':
filepath = filepath + '/'
if not os.path.exists(filepath):
os.makedirs(filepath)
num_model = len(self.__modelset.get_modelinfos())
for mi in self.__modelset.get_modelinfos():
if isinstance(mi.model, str):
dm = self.load_deepmodel(mi.model)
mi.model = dm
if not isinstance(mi.model, DeepModel):
raise ValueError(f'Currently does not support saving non-DeepModel models.')
if num_model == 1 and deepmodel_basename is not None:
mi.name = deepmodel_basename
self.__current_model = deepmodel_basename
modelfile = f'{filepath}{mi.name}.h5'
mi.model.save(modelfile)
mi.model = modelfile
with open(f'{filepath}dt.pkl', 'wb') as output:
pickle.dump(self, output, protocol=2)
@staticmethod
def load(filepath):
if filepath[-1] != '/':
filepath = filepath + '/'
with open(f'{filepath}dt.pkl', 'rb') as input:
dt = pickle.load(input)
dt.restore_modelset(filepath)
return dt
def restore_modelset(self, filepath):
for mi in self.__modelset.get_modelinfos():
if isinstance(mi.model, str):
modelfile = mi.model
modelfile = os.path.split(modelfile)[-1]
dm = self.load_deepmodel(f'{filepath}{modelfile}')
mi.model = dm
def load_deepmodel(self, filepath):
if os.path.exists(filepath):
print(f'Load model from disk:{filepath}.')
dm = DeepModel(self.task, self.num_classes, self.config,
self.preprocessor.categorical_columns, self.preprocessor.continuous_columns, filepath)
return dm
else:
raise ValueError(f'Invalid model filename:{filepath}.')
def _fit_and_score(task, num_classes, config, categorical_columns, continuous_columns,
n_fold, valid_idx, X_train, y_train, X_val, y_val,
X_eval=None, X_test=None, model_file=None,
batch_size=128, epochs=1, verbose=0, callbacks=None,
shuffle=True, class_weight=None, sample_weight=None,
initial_epoch=0, steps_per_epoch=None, validation_steps=None, validation_freq=1,
max_queue_size=10, workers=1, use_multiprocessing=False):
print(f'\nFold:{n_fold + 1}\n')
model = DeepModel(task, num_classes, config, categorical_columns, continuous_columns)
history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=verbose,
callbacks=callbacks, validation_data=(X_val, y_val),
shuffle=shuffle, class_weight=class_weight, sample_weight=sample_weight,
initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps, validation_freq=validation_freq,
max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing)
print(f'Fold {n_fold + 1} fitting over.')
oof_proba = model.predict(X_val)
eval_proba = None
test_proba = None
if X_eval is not None:
eval_proba = model.predict(X_eval)
if X_test is not None:
test_proba = model.predict(X_test)
if model_file is not None:
file = f'{model_file}.test_proba.csv'
| pd.DataFrame(test_proba) | pandas.DataFrame |
#!/usr/bin/env python3.6
# Copyright 2017 <NAME> <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Create charts from the benchmark results
# ----------------------------------------------------------------------------
import glob
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import numpy as np
import pandas as pd
import seaborn as sns
# This script should be run inside the results directory.
sns.set_style("darkgrid")
# Selected from https://matplotlib.org/users/colormaps.html#qualitative
sns.set_palette(sns.color_palette("tab20", n_colors=11))
# Name for Pool Size Parameter in results
param_pool_size = "Object Pool Size"
# Adjust left for single plot
left_adjust_single = 0.2
# Adjust left for multiple plots
left_adjust_multiple = 0.12
def print_dataframe(df):
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
def save_plot(df, title, filename, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2, print_data=False,
formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
g = sns.factorplot(x=x, y="Score", hue=hue, col=col, data=df, kind='bar',
size=5, aspect=1, col_wrap=col_wrap, legend=False)
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.subplots_adjust(top=0.9, left=left)
g.fig.suptitle(title)
plt.legend(loc='best', title=hue, frameon=True)
plt.savefig(filename)
plt.clf()
plt.close(fig)
# Plot bar charts with error bars
# Some links helped:
# https://stackoverflow.com/a/42033734/1955702
# https://stackoverflow.com/a/30428808/1955702
# https://matplotlib.org/devdocs/gallery/api/barchart.html#sphx-glr-gallery-api-barchart-py
def barplot_with_errorbars(x, y, yerr, x_values, hue_values, label, **kwargs):
# x_values and benchmarks must be sorted
data = kwargs.pop("data")
x_values_length = len(x_values)
n = np.arange(x_values_length)
offsets = (np.arange(len(hue_values)) - np.arange(len(hue_values)).mean()) / (len(hue_values) + 1.)
width = np.diff(offsets).mean()
# Make sure x axis data is sorted
data = data.sort_values(x)
data_length = len(data)
if data_length < x_values_length:
print('WARN: Not enough data points for %s. Expected %d, Found %d' % (label, x_values_length, data_length))
for i, benchmark in enumerate(hue_values):
if label == benchmark:
plt.bar(n[:data_length] + offsets[i], data[y], width=width, label=label, yerr=data[yerr], capsize=2)
plt.xticks(n, x_values)
def save_plot_with_error_bars(df, title, filename, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2,
print_data=False,
formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
x_values = sorted(df[x].unique())
hue_values = sorted(df[hue].unique())
g = sns.FacetGrid(df, hue=hue, col=col, size=5, aspect=1, col_wrap=col_wrap)
g = g.map_dataframe(barplot_with_errorbars, x, "Score", "Score Error (99.9%)", x_values, hue_values)
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.subplots_adjust(top=0.9, left=left)
g.fig.suptitle(title)
plt.legend(loc='best', title=hue, frameon=True)
plt.savefig(filename)
plt.clf()
plt.close(fig)
def save_plots(df, title, filename_prefix, x="Threads", hue="Benchmark", col=param_pool_size, col_wrap=2,
print_data=False, formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)), left=left_adjust_multiple):
# Save two plots with and without error bars
# Plotting errorbars with dataframe data in factorplot is not directly supported.
# First plot is important and must be used to verify the accuracy of the plot with error bars.
save_plot(df, title, filename_prefix + '.png', x=x, hue=hue, col=col, col_wrap=col_wrap, print_data=print_data,
formatter=formatter, left=left)
save_plot_with_error_bars(df, title, filename_prefix + '-with-error-bars.png', x=x, hue=hue, col=col,
col_wrap=col_wrap, print_data=print_data, formatter=formatter, left=left)
def save_lmplot(df, x, title, filename, print_data=False, formatter=tkr.FuncFormatter(lambda y, p: "{:,}".format(y)),
left=left_adjust_single):
unit = df['Unit'].unique()[0]
print("Creating chart: " + title + ", filename: " + filename + ".")
if print_data:
print_dataframe(df)
fig, ax = plt.subplots()
markers_length = len(df["Benchmark"].unique())
g = sns.lmplot(data=df, x=x, y="Score", hue="Benchmark", size=6, legend=False, x_jitter=0.2, y_jitter=0.5,
markers=['o', 'v', '^', '<', '>', '+', 's', 'p', '*', 'x', 'D'][:markers_length])
for ax in g.axes.flatten():
ax.yaxis.set_major_formatter(formatter)
plt.subplots_adjust(top=0.9, left=left)
g.set_axis_labels(y_var="Score (" + unit + ")")
plt.legend(loc='upper left', frameon=True)
g.fig.suptitle(title)
plt.savefig(filename)
plt.clf()
plt.cla()
plt.close(fig)
def replace_benchmark_names(df):
df = df.replace(r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject$', r'\1',
regex=True)
df = df.replace([r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject:useObject(.*)$'],
[r'\1\2'], regex=True)
# Profiler Details
df = df.replace([r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.(.*)Benchmark.useObject:(.*)$'],
[r'\1\2'], regex=True)
df = df.replace('com.github.chrishantha.microbenchmark.objectpool.TestObjectBenchmark.expensiveObjectCreate',
'OnDemandExpensiveObject', regex=False)
df = df.replace(
r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.TestObjectBenchmark\.expensiveObjectCreate' +
r':expensiveObjectCreate(.*)$', r'OnDemandExpensiveObject\1', regex=True)
# Profiler Details
df = df.replace(
r'^com\.github\.chrishantha\.microbenchmark\.objectpool\.TestObjectBenchmark\.expensiveObjectCreate' +
r':(.*)$', r'OnDemandExpensiveObject\1', regex=True)
return df
def save_percentile_plot(df, title_percentile, percentile):
df_sample_percentile = df.loc[df['Benchmark'].str.endswith(percentile)]
save_plot(df_sample_percentile, "Sample Time " + title_percentile + "th Percentile Comparison",
"sample-time-" + percentile + "th-percentile.png", formatter=tkr.FormatStrFormatter('%.2e'))
def main():
all_results = glob.glob("results-*-threads.csv")
print("Creating charts using data in following files:")
for file in all_results:
print(file)
print("\nCreating charts...\n")
df = pd.concat(map(pd.read_csv, all_results), ignore_index=True)
df = replace_benchmark_names(df)
df.rename(columns={"Param: poolSize": param_pool_size}, inplace=True)
df.to_csv('all_results.csv')
# df = df[df['Benchmark'].isin(['FastObjectPool', 'StackObjectPool', 'StormpotBlazePool'])]
thrpt_unit = 'ops/ms'
sample_unit = 'ms/op'
alloc_rate_unit = 'MB/sec'
df_thrpt = df.loc[(df['Mode'] == "thrpt") & (df['Unit'] == thrpt_unit)]
thrpt_mask = df_thrpt['Benchmark'].isin(['OnDemandExpensiveObject'])
save_plots(df_thrpt[~thrpt_mask], "Throughput vs Threads Comparison", "thrpt-vs-threads")
save_plots(df_thrpt[~thrpt_mask], "Throughput vs Pool Sizes Comparison", "thrpt-vs-pool-sizes", col="Threads",
x=param_pool_size)
save_lmplot(df_thrpt, "Threads", "Throughput vs Threads", "lmplot-thrpt-vs-threads.png")
save_lmplot(df_thrpt[~pd.isnull(df_thrpt[param_pool_size])], param_pool_size, "Throughput vs Pool Sizes",
"lmplot-thrpt-vs-pool-sizes.png")
for benchmark in df_thrpt[~thrpt_mask]['Benchmark'].unique():
df_benchmark_thrpt = df_thrpt[df_thrpt['Benchmark'] == benchmark]
save_plots(df_benchmark_thrpt, "Throughput vs Threads", "thrpt-" + benchmark, col="Benchmark",
hue=param_pool_size, col_wrap=1, left=left_adjust_single)
df_sample = df.loc[(df['Mode'] == "sample") & (df['Unit'] == sample_unit)]
# Score Error (99.9%) is NaN for percentiles
df_sample_without_percentiles = df_sample[~pd.isnull(df_sample['Score Error (99.9%)'])]
df_sample_pools_without_percentiles = df_sample_without_percentiles[
~pd.isnull(df_sample_without_percentiles[param_pool_size])]
time_formatter = tkr.FuncFormatter(lambda y, p: "{:.2e}".format(y))
sample_mask = df_sample_without_percentiles['Benchmark'].isin(['OnDemandExpensiveObject'])
save_plots(df_sample_without_percentiles[~sample_mask], "Sample Time vs Threads Comparison",
"sample-time-vs-threads", formatter=time_formatter)
save_plots(df_sample_pools_without_percentiles, "Sample Time vs Pool Sizes Comparison",
"sample-time-vs-pool-sizes", col="Threads", x=param_pool_size, formatter=time_formatter)
save_lmplot(df_sample_without_percentiles, "Threads", "Sample Time vs Threads", "lmplot-sample-vs-threads.png",
formatter=time_formatter, left=left_adjust_single)
save_lmplot(df_sample_pools_without_percentiles, param_pool_size, "Sample Time vs Pool Sizes",
"lmplot-sample-vs-pool-sizes.png", formatter=time_formatter, left=left_adjust_single)
for benchmark in df_sample_pools_without_percentiles['Benchmark'].unique():
df_benchmark_sample = df_sample_pools_without_percentiles[
df_sample_pools_without_percentiles['Benchmark'] == benchmark]
save_plots(df_benchmark_sample, "Sample Time vs Threads", "sample-time-" + benchmark, col="Benchmark",
hue=param_pool_size, col_wrap=1, formatter=time_formatter, left=left_adjust_single)
# Filter OnDemandExpensiveObject
df_sample_pools = df_sample[~df_sample['Benchmark'].str.contains('OnDemandExpensiveObject.*')]
save_percentile_plot(df_sample_pools, '50', 'p0.50')
save_percentile_plot(df_sample_pools, '90', 'p0.90')
save_percentile_plot(df_sample_pools, '95', 'p0.95')
save_percentile_plot(df_sample_pools, '99', 'p0.99')
save_percentile_plot(df_sample_pools, '99.9', 'p0.999')
save_percentile_plot(df_sample_pools, '99.99', 'p0.9999')
save_percentile_plot(df_sample_pools, '100', 'p1.00')
df_sample_percentiles = df_sample_pools.copy()
df_sample_percentiles = df_sample_percentiles.loc[pd.isnull(df_sample_percentiles['Score Error (99.9%)'])]
df_sample_percentiles['Pool'] = df_sample_percentiles['Benchmark'].str.extract('(?P<Pool>\w+Pool)', expand=True)
df_sample_pool_percentiles = df_sample_percentiles.loc[~ | pd.isnull(df_sample_percentiles['Pool']) | pandas.isnull |
import pandas as pd
from pathlib import Path
import os
import numpy as np
import datetime
from pickle_plotting import get_file_paths
import logarithmoforecast as lf
import holidays
def pickle_directory(datasets_dir, pickle_dir):
file_paths = os.listdir(datasets_dir)
sdp_series = {}
for path in file_paths:
number = Path(path).stem
print(number)
df = pd.read_csv(datasets_dir / path, header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
# df = pd.read_csv(r"/home/joelhaubold/Dokumente/BADaten/FiN-Messdaten-LV_Spannung_Teil2/tmpFile-1492693540182.csv", header=4, sep=';', usecols=[0, 1, 2, 3, 4, 5], decimal=",")
df.drop(columns=['AliasName', 'Unit'])
df = df.set_index('TimeStamp')
df = df.sort_index()
sdp_list = df.ServiceDeliveryPoint.unique()
print(sdp_list)
for sdp in sdp_list:
df_sdp = df.loc[df.ServiceDeliveryPoint == sdp, :] # Slim the pd down here for less memory consumption?
if sdp in sdp_series:
combined_df = sdp_series.get(sdp)
combined_df = pd.concat([combined_df, df_sdp]).sort_index()
sdp_series[sdp] = combined_df
else:
sdp_series[sdp] = df_sdp
for key, value in sdp_series.items():
print(key)
if not os.path.exists(pickle_dir / key):
os.makedirs(pickle_dir / key)
value.index = pd.to_datetime(value.index)
pos1 = value.Description == 'Electric voltage momentary phase 1 (notverified)'
df_phase1 = value.loc[pos1, :]
pos2 = value.Description == 'Electric voltage momentary phase 2 (notverified)'
df_phase2 = value.loc[pos2, :]
pos3 = value.Description == 'Electric voltage momentary phase 3 (notverified)'
df_phase3 = value.loc[pos3, :]
# for phase in ['1', '2', '3']:
# if not os.path.exists('pickles/' + key + '/phase'+phase):
# os.makedirs('pickles/' + key + '/phase'+phase)
df_phase1.to_pickle(pickle_dir / key / "phase1")
df_phase2.to_pickle(pickle_dir / key / "phase2")
df_phase3.to_pickle(pickle_dir / key / "phase3")
# value.to_pickle(r"pickles/"+key+"/3PhasesDF")
def add_help_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
print("Opened pickle")
phase_values = pd.DataFrame()
for i, df_p in enumerate(df_phases):
df_p.drop(columns=['Unit', 'AliasName'], inplace=True)
phase = 'p' + str(i + 1)
phase_values[phase] = df_p.Value
for df_p in df_phases:
df_p['row_dif'] = df_p.Value.diff()
print("Created help values")
np.diff(phase_values.values)
phase_values['max_dif'] = phase_values.apply(
lambda row: max(abs(row['p1'] - row['p2']), abs(row['p1'] - row['p3']),
abs(row['p2'] - row['p3'])), axis=1)
print("Calculated help data")
for df_p in df_phases:
df_p['phase_dif'] = phase_values['max_dif']
print("Assigned help data")
for i, df_p in enumerate(df_phases):
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def update_trafo(pickle_dir=Path('pickles')):
# pd.options.mode.chained_assignment = None
file_paths = get_file_paths(pickle_dir)
print(file_paths)
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print("Opened pickle")
df_row_difs = pd.DataFrame()
for p, df_p in enumerate(df_phases):
df_p['row_dif'] = df_p.Value.diff() / df_p.Value.index.to_series().diff().dt.total_seconds()
df_row_difs[str(p)] = df_p['row_dif']
df_row_difs.loc[True ^ (((df_row_difs['0'] >= 0) & (df_row_difs['1'] >= 0) & (df_row_difs['2'] >= 0)) | (
(df_row_difs['0'] < 0) & (df_row_difs['1'] < 0) & (df_row_difs['2'] < 0)))] = 0
df_row_difs = df_row_difs.abs()
for df_p in df_phases:
# df_p['trafo'] = min(df_phases[0]['row_dif'].abs(), df_phases[1]['row_dif'].abs(), df_phases[2]['row_dif'].abs())
df_p['trafo'] = df_row_difs.min(axis=1)
print("Assigned help data")
for i, df_p in enumerate(df_phases):
# print(df_p)
df_p.to_pickle(path / ("h_phase" + str(i + 1)))
def add_seasonal_data(pickle_dir=Path('pickles')):
seasonal_data = pd.DataFrame()
file_paths = get_file_paths(pickle_dir)
print(file_paths)
day = pd.Timedelta('1d')
for path in file_paths:
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p))[['Value']], ['1', '2', '3']))
weekday_dfs_phases = [[None for x in range(7)] for y in range(3)]
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
for p, df_p in enumerate(df_phases):
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
df_p_day = df_p.loc[start_time:end_time]
df_p_day_med = df_p_day.resample('30s').median().rename(columns={'Value': str(start_time.date())})
df_p_day_med.index = df_p_day_med.index.time
weekday = start_time.date().weekday()
# print(weekday_dfs_phases[p][weekday])
if weekday_dfs_phases[p][weekday] is None:
weekday_df = df_p_day_med
weekday_dfs_phases[p][weekday] = weekday_df
else:
weekday_df = weekday_dfs_phases[p][weekday]
weekday_df = weekday_df.join(df_p_day_med, how='outer')
weekday_dfs_phases[p][weekday] = weekday_df
print("Split DF")
for p, df_weekdays in enumerate(weekday_dfs_phases):
for w, df in enumerate(df_weekdays):
df['med'] = df.median(axis=1)
# print(df)
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
print(df_phases_h)
for p, df_p in enumerate(df_phases_h):
print(p)
df_weekdays = weekday_dfs_phases[p]
df_p['SeasDif'] = df_p.apply(lambda row: (row['Value'] - df_weekdays[row.name.weekday()].loc[
(row.name - datetime.timedelta(seconds=row.name.second % 30,
microseconds=row.name.microsecond)).time()]['med']), axis=1)
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def add_new_seasonal_data(pickle_dir=Path('pickles')):
file_paths = get_file_paths(pickle_dir)
for path in file_paths:
station_season = pd.read_pickle(pickle_dir / (path + 'season_aggregation'))
print(path)
path = pickle_dir / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for p, df_p in enumerate(df_phases):
df_p.drop(labels='SeasDif', inplace=True, errors='ignore')
print(p)
print(df_p)
v1s = []
print(station_season)
print(station_season.sort_index())
for index, row in df_p.iterrows():
print(row['Value'])
print(index)
print(index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond))
print(station_season.loc[index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond)])
v1 = row['Value'] - station_season.loc[index - datetime.timedelta(seconds=index.second % 30,
microseconds=index.microsecond)]
print(v1)
v1s.append(v1)
df_p['SeasDif'] = v1s
print(df_p)
df_p.to_pickle(path / ("h_phase" + str(p + 1)))
def add_cross_station_data(pickle_dir=Path('pickles')):
station_avgs = | pd.read_pickle(pickle_directory / "meanStationValues") | pandas.read_pickle |
import pandas as pd
import re
from collections import OrderedDict
#
# This file includes functions, used in training procedure. The functions are simple and self-explaining.
# Please use README, that describes the sequence of steps.
#
def helper_sentence_to_tokens(snt):
step1 = []
for token in snt.split(' '):
handled = False
if '-' in token:
subkns = token.split('-')
for i in range(0, len(subkns) - 1):
step1.append(subkns[i])
step1.append('-')
step1.append(subkns[len(subkns) - 1])
handled = True
if not handled:
step1.append(token)
step2 = []
for token in step1:
m = re.search("^([0-9:\.,½¼¾⅛⅔⅓$¥€£]+)([/А-яа-яA-Za-z²³2\"\'\.\,]+)$", token)
if m:
num = m.group(1)
suffix = m.group(2)
step2.append(num)
step2.append(suffix)
else:
step2.append(token)
return step2
def sentence_to_words_and_chars(input_file, output_file):
sentences = open(input_file, "r+", encoding='utf-8').readlines()
processed_sentences = []
for snt in sentences:
new_snt = []
for token in helper_sentence_to_tokens(snt):
if not re.match("\<T[0-9]*\>", token) and not re.match("\</T[0-9]*\>", token) and \
re.match("^[A-Za-z0-9+-г\./]*$", token) or re.match(
"^[A-Za-z#0+-9½¼¾⅛⅔⅓_—\-\,\.\$¥€£\:\%\(\)\\\/]*$",
token) or re.match("^[А-Я]*$", token) or \
(re.match("^[А-Яа-я]*$", token) and (sum(1 for c in token if c.isupper()) > 2)) or \
re.match("^[А-Я]\.[А-Я]\.$", token) or re.match("^[А-Я]\.[А-Я]\.[А-Я]\.$", token):
new_snt = new_snt + list(token)
else:
new_snt = new_snt + [token]
processed_sentences.append(" ".join(new_snt))
res_out = open(output_file, "w+", encoding='utf-8')
res_out.writelines(processed_sentences)
res_out.close()
def disjoin_source_target(data_file, src_file, tgt_file):
data = pd.read_csv(data_file, encoding='utf-8')
in_df_l = [] # pd.DataFrame(index=False, columns=["sentence_id","token_id","before"])
out_df_l = [] # pd.DataFrame(index=False, columns=["id","after"])
for (sid, tid, before, after) in data[['sentence_id', 'token_id', 'before', 'after']].values:
in_df_l.append([sid, tid, before])
out_df_l.append(["%s_%s" % (sid, tid), after])
in_df = pd.DataFrame(data=in_df_l, columns=["sentence_id", "token_id", "before"])
out_df = pd.DataFrame(data=out_df_l, columns=["id", "after"])
in_df.to_csv(src_file, encoding='utf-8', index=False)
out_df.to_csv(tgt_file, encoding='utf-8', index=False)
def source_to_sentences(input_file, output_file):
# Processing 'in' file first - writing it into sentences.
source_data = | pd.read_csv(input_file, encoding='utf-8') | pandas.read_csv |
import pandas as pd
import functools as ft
import numpy as np
import sys
# driver script for applying the refactoring transformation
class Stmt:
def __init__(self, start_line, start_char, end_line, end_char):
self.start_line = start_line
self.start_char = start_char
self.end_line = end_line
self.end_char = end_char
def __str__(self):
return("[" + str(self.start_line) + ", " + str(self.start_char) + "; " + str(self.end_line) + ", " + str(self.end_char) + "]")
def __eq__(self, other):
return( self.start_line == other.start_line and self.end_line == other.end_line and self.start_char == other.start_char and self.end_char == other.end_char)
def __lt__(self, other):
return( self.start_line < other.start_line or (self.start_line == other.start_line and self.start_char < other.start_char))
def __le__(self, other):
return( self < other or self == other)
def __hash__(self):
return id(self)
def subsumes(self, other): # returns true if self subsumes the other_stmt
left_sub = self.start_line < other.start_line or (self.start_line == other.start_line and self.start_char <= other.start_char)
right_sub = self.end_line > other.end_line or (self.end_line == other.end_line and self.end_char >= other.end_char)
return( left_sub and right_sub and not self == other)
class ParseTreeNode:
def __init__(self, child_list, stmt):
self.stmt = stmt
self.child_list = child_list
self.is_leaf = False
self.text = [[]]
self.parent = None
if len(child_list) == 0:
self.is_leaf = True
for c in child_list:
c.parent = self
def __set_self_text(self, file_contents):
if self.is_leaf:
self.text = [get_stmt( self.stmt, file_contents)] # return array of strings representing the lines of the statement
return
# if we're here it means there is at least one child
current_child = self.child_list[0]
# get the text from the beginning of the stmt until the beginning of the first child node
subs = 0 if current_child.stmt.start_line == self.stmt.start_line else 1
self.text = [get_stmt( Stmt(self.stmt.start_line, self.stmt.start_char, current_child.stmt.start_line, current_child.stmt.start_char - subs), file_contents)]
for ind in range(1, len(self.child_list)):
next_child = self.child_list[ind]
# print(Stmt(current_child.stmt.end_line, current_child.stmt.end_char + 1, next_child.stmt.start_line, next_child.stmt.start_char))
self.text += [get_stmt( Stmt(current_child.stmt.end_line, current_child.stmt.end_char + 1, next_child.stmt.start_line, next_child.stmt.start_char - 1), file_contents)]
current_child = next_child
adds = 0 if len(self.child_list) == 1 and self.stmt.start_line == self.stmt.end_line and self.stmt.end_char > self.child_list[0].stmt.end_char else 1
self.text += [get_stmt( Stmt(current_child.stmt.end_line, current_child.stmt.end_char + adds, self.stmt.end_line, self.stmt.end_char), file_contents)]
def set_text(self, file_contents):
self.__set_self_text(file_contents)
for c in self.child_list:
c.set_text(file_contents)
def get_text(self):
to_ret = self.text[0].copy()
for ind in range(1, len(self.text)):
to_ret += self.child_list[ind - 1].get_text()
to_ret += self.text[ind].copy()
return( to_ret)
def print(self):
print_array_newline_sep( self.get_text())
def __hash__(self):
return id(self)
def count_tree(rnode):
count = 1
for c in rnode.child_list:
count += count_tree(c)
return count
# text is an array of arrays, just the first element
# is an array of strings
# we want to get the index of the element with "await" in it
# there should just be one (but if there's more than one we can just do the first one)
def get_index_with_await( text, swapping_last = False):
indices = [(k, i) for k in range(len(text)) for i in range( 0, len(text[k])) if text[k][i].count("await ") > 0]
if len(indices) > 1 or len(indices) == 0:
print("WHAT IS GOING ON: " + str(text))
return( (-1, -1))
if swapping_last:
return( indices[-1]) # if we're reordering forward, get the last await
return( indices[0]) # otherwise, get the first await
def corresp_paren(p):
return {
'(' : ')',
')' : '(',
'[' : ']',
']' : '[',
'{' : '}',
'}' : '{'
}.get(p, p) # just return p itself as a default if it's not in the dict
def build_paren_stack( a_string):
paren_stack = ""
for c in a_string:
if c == "(" or c == "{" or c == "[":
paren_stack += c
elif c == ")" or c == "}" or c == "]":
if len(paren_stack) > 0 and paren_stack[-1] == corresp_paren(c):
paren_stack = paren_stack[ : -1]
else:
paren_stack += c
return( paren_stack)
# we need to match the parens before and after the awaits, in case
# note: no need to look through the text of the child_list, since these are self-contained
# statements and so won't include closing/opening parens in part of the enclosing stmt, which
# is the statement we're parsing
def get_compensating_parens( text, text_ind, ind_to_split_at):
if ind_to_split_at == -1: # then there was an error
return( -1, -1)
start_text = text[text_ind][0 : ind_to_split_at]
end_text = text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = text[text_ind][ind_to_split_at]
front_paren_stack = build_paren_stack( ''.join(start_text) + split_text[0: split_text.index('await')])
end_paren_stack = build_paren_stack( split_text[ split_text.index('await') + len('await') : ] + ''.join(end_text))
if build_paren_stack(front_paren_stack + end_paren_stack) != "":
#raise ValueError("Mismatched parens in: " + text[text_ind][ind_to_split_at])
return( -1, -1)
return( front_paren_stack, end_paren_stack)
# this is like move_stmt, but instead of just shifting the statement, we're actually going
# to split it into an initial promise creation:
# var temp = < portion of the statement after the await >
# this goes where the statement would be moved to
# < portion of the statement before the await > = await temp
# this should just involve changing the text() of the moved node and the placeholder node
# no scoping issues should ensue, since we're moving the whole statement
def move_and_split_await( root_node, root_map, stmt_to_move, stmt_to_move_before, temp_var_name, add_timing = False, filename = ""):
node_to_move = root_map[stmt_to_move]
node_to_move_before = root_map[stmt_to_move_before]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# NEW ITEM: update the text (using temp_var_name as specified)
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- then, it's not blank any more: the text is the corresponding "everything before the await" = await temp_var_name
# -- and, when we move the node, we're actually replacing the text to be var temp_var_name = "everything after the await"
# -- also, split the child node list
# -- and, replace it in the node_map
old_pos_node = ParseTreeNode( [], stmt_to_move)
# now, compute the index to split at
(text_ind, ind_to_split_at) = get_index_with_await(node_to_move.text)
#pre-compute paren stacks so we can catch all errors at once
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_move.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically swap the following statement: ")
node_to_move.print()
print("With the following statement: ")
node_to_move_before.print()
print("DONE REPORTING PROBLEMS")
# return( root_node, root_map, 0)
return(0)
# split both the text array and the child_list
start_text = node_to_move.text[text_ind][0 : ind_to_split_at]
end_text = node_to_move.text[text_ind][ ind_to_split_at + 1 : ]
start_child_list = node_to_move.child_list[0 : ind_to_split_at]
end_child_list = node_to_move.child_list[ ind_to_split_at :]
# get the text we're going to split:
split_text = node_to_move.text[text_ind][ind_to_split_at]
string_before_await = split_text[0: split_text.index('await')]
string_after_await = split_text[ split_text.index('await') + len('await') : ]
if build_paren_stack(string_after_await) != "":
return( 0)
# now, add the new updates to the strings
# don't forget the parens
string_before_await = string_before_await + " await " + temp_var_name + end_paren_stack
string_after_await = "var " + temp_var_name + " = " + front_paren_stack + string_after_await
# and, set up the nodes
# starting off the same as before
if node_to_move.parent != None:
child_list_to_rem_from = node_to_move.parent.child_list
child_list_to_rem_from[child_list_to_rem_from.index(node_to_move)] = old_pos_node
root_map[stmt_to_move] = old_pos_node
if node_to_move_before.parent != None:
child_list_to_add_to = node_to_move_before.parent.child_list
child_list_to_add_to.insert( child_list_to_add_to.index(node_to_move_before), node_to_move)
node_to_move_before.parent.text.insert( child_list_to_add_to.index(node_to_move_before), [""])
node_to_move.parent = node_to_move_before.parent
# now, update the text and child_lists in node_to_move and old_pos_node:
# node_to_move gets the string after await (i.e. the promise creation we want to move earlier)
# and, gets the end_child_list (for the same reason)
bad_paren_start = ""
bad_parens = False
if add_timing:
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(old_pos_node.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
new_await_timing_var = "await " + temp_var_name + end_paren_stack + ";\n "
string_before_await = split_text[0: split_text.index('await')]
# can't add timing if the await is in the middle of a statement, for example like multiassignment in kactus's utils.ts
# so in this case just put the timing before the entire statement
if build_paren_stack(string_before_await) != "":
bad_parens = True
if len( str.strip(string_before_await)) > 0:
# only create this variable if we actually need it (just having it hanging out alone at the end of string_before_await is an error, but then it's also an error if created but never used)
string_before_await += " AWAIT_VAR_TIMING_" + temp_var_name
new_await_timing_var = "var AWAIT_VAR_TIMING_" + temp_var_name + " = " + new_await_timing_var
if not bad_parens:
string_before_await = timing_pre_text + new_await_timing_var + timing_post_text + string_before_await
else:
bad_paren_start = timing_pre_text + new_await_timing_var + timing_post_text
node_to_move.text = [[string_after_await] + end_text]
node_to_move.child_list = end_child_list
old_pos_node.text = [merge_into_first_string_of_list( start_text, bad_paren_start) + [string_before_await]]
old_pos_node.child_list = start_child_list
# return( root_node, root_map)
return(1)
# this relies on the earlier swaps already being done
def move_await_later( root_node, root_map, stmt_to_move, stmt_to_move_after, temp_var_name, add_timing = False, filename = ""):
swapping_last = True
node_to_move = root_map[stmt_to_move]
node_to_move_after = root_map[stmt_to_move_after]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# NEW ITEM: update the text (using temp_var_name as specified)
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- then, it's not blank any more: the text is the corresponding "everything before the await" = await temp_var_name
# -- and, when we move the node, we're actually replacing the text to be var temp_var_name = "everything after the await"
# -- also, split the child node list
# -- and, replace it in the node_map
old_pos_node = ParseTreeNode( [], stmt_to_move)
# now, compute the index to split at
(text_ind, ind_to_split_at) = get_index_with_await(node_to_move.text, swapping_last)
#pre-compute paren stacks so we can catch all errors at once
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_move.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically swap the following statement: ")
node_to_move.print()
print("With the following statement: ")
node_to_move_after.print()
print("DONE REPORTING PROBLEMS")
# return( root_node, root_map, 0)
return(0)
# split both the text array and the child_list
start_text = node_to_move.text[text_ind][0 : ind_to_split_at]
end_text = node_to_move.text[text_ind][ ind_to_split_at + 1 : ]
start_child_list = node_to_move.child_list[0 : ind_to_split_at]
end_child_list = node_to_move.child_list[ ind_to_split_at :]
# get the text we're going to split:
split_text = node_to_move.text[text_ind][ind_to_split_at]
string_before_await = split_text[0: split_text.rindex('await')]
string_after_await = split_text[ split_text.rindex('await') + len('await') : ]
if build_paren_stack(string_after_await) != "":
return( 0)
new_await_var = "var " + temp_var_name + "_LATER = " + front_paren_stack + split_text[ split_text.rindex('await') + len('await') : ]
# if len( str.strip(string_before_await)) > 0:
# string_before_await = string_before_await + " " + temp_var_name + "_LATER"
# if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# # don't even split the text any more
# node_to_split.text = [[timing_pre_text]+ node_to_split.text[text_ind] + ["\n" + timing_post_text]]
# else:
node_to_move.text[text_ind] = start_text + [new_await_var] + end_text + [end_paren_stack]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_move
node_to_move.child_list.insert( ind_to_split_at, placeholder_node)
node_to_move.text.insert(ind_to_split_at, [""])
# now, add the await to the node to move after
actual_await_node = ParseTreeNode([], None)
actual_await_node.parent = node_to_move_after
actual_await_node.text = [[string_before_await + "await " + temp_var_name + "_LATER"]]
if add_timing:
timing_pre_text = "var TIMING_" + temp_var_name + "_LATER = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(old_pos_node.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "_LATER));\n "
actual_await_node.text = [[timing_pre_text + timing_post_text + actual_await_node.text[0][0]]]
# make room in the parent node, make sure to add it at the end
node_to_move_after.text += [[""]]
node_to_move_after.child_list += [actual_await_node]
return( 1)
def merge_into_first_string_of_list( string_list, to_merge):
if string_list == []:
return( [to_merge])
string_list[0] = to_merge + string_list[0]
return( string_list)
def time_await( root_node, root_map, stmt_to_time, temp_var_name, filename):
node_to_time = root_map[stmt_to_time]
# all we care about here is the await, we;re just updating the text
(text_ind, ind_to_split_at) = get_index_with_await(node_to_time.text)
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_time.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically time the following statement: ")
node_to_time.print()
print("DONE REPORTING PROBLEMS")
return( 0)
# split both the text array and the child_list
start_text = node_to_time.text[text_ind][0 : ind_to_split_at]
end_text = node_to_time.text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = node_to_time.text[text_ind][ind_to_split_at]
# string_after_await = split_text[ split_text.index('await') + len('await') : ]
# now, add the new updates to the strings
# don't forget the parens
# string_after_await = "var " + temp_var_name + " = " + front_paren_stack + string_after_await
# always add timing
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(node_to_time.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
new_await_timing_var = "await " + front_paren_stack + split_text[ split_text.index('await') + len('await') : ]
string_before_await = split_text[0: split_text.index('await')]
if len( str.strip(string_before_await)) > 0:
string_before_await = string_before_await + " AWAIT_VAR_TIMING_" + temp_var_name
new_await_timing_var = "var AWAIT_VAR_TIMING_" + temp_var_name + " = " + new_await_timing_var
string_timing_await = timing_pre_text + new_await_timing_var
if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# don't even split the text any more
node_to_time.text[text_ind] = [timing_pre_text] + node_to_time.text[text_ind] + ["\n" + timing_post_text]
else:
node_to_time.text[text_ind] = start_text + [string_timing_await]+ end_text + [end_paren_stack + timing_post_text + string_before_await]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_time
node_to_time.child_list.insert( ind_to_split_at, placeholder_node)
node_to_time.text.insert(ind_to_split_at, [""])
return( 1)
def time_call( root_node, root_map, stmt_to_time, temp_var_name, filename):
node_to_time = root_map[stmt_to_time]
# all we care about here is the await, we;re just updating the text
# unlike where we were timing the awaits, now we're actually timing the whole statement and have no
# need to split it. all we do is put timing code around the statement
timing_pre_text = "var TIMING_" + temp_var_name + " = perf_hooks.performance.now();\n "
timing_post_text = "console.log(\"" + filename + "& " + str(node_to_time.stmt) + "& " + temp_var_name + "& \" + (perf_hooks.performance.now() - TIMING_" + temp_var_name + "));\n "
placeholder_nodes = (ParseTreeNode([], None), ParseTreeNode([], None))
placeholder_nodes[0].parent = node_to_time
placeholder_nodes[1].parent = node_to_time
node_to_time.child_list.insert( 0, placeholder_nodes[0])
node_to_time.child_list += [placeholder_nodes[1]]
node_to_time.text.insert(0, [timing_pre_text])
node_to_time.text += [[timing_post_text]]
return( 1)
def move_stmt( root_node, root_map, stmt_to_move, stmt_to_move_before):
node_to_move = root_map[stmt_to_move]
node_to_move_before = root_map[stmt_to_move_before]
# updates required:
# remove node_to_move from its parent's child list
# then, add it before stmt_to_move_before
# when we remove node_to_move from the child list:
# -- just replace it with a new, blank node but with the old stmt as the stmt
# -- and, replace it in the node_map
child_list_to_rem_from = node_to_move.parent.child_list
placeholder_node = ParseTreeNode( [], stmt_to_move)
child_list_to_rem_from[child_list_to_rem_from.index(node_to_move)] = placeholder_node
root_map[stmt_to_move] = placeholder_node
child_list_to_add_to = node_to_move_before.parent.child_list
child_list_to_add_to.insert( child_list_to_add_to.index(node_to_move_before), node_to_move)
node_to_move_before.parent.text.insert( child_list_to_add_to.index(node_to_move_before), [""])
node_to_move.parent = node_to_move_before.parent
return( root_node, root_map)
def convert__file_spec_stmt_list_to_tree( stmt_list, file_contents):
# can iterate through the dataframe
# probably need some recursive setup here, but this is going to be the wrapper helper function
# first, make a root statement that encompasses the whole file
root_stmt = Stmt(0, 0, len(file_contents) - 1, len(file_contents[-1]))
[root_node, root_map] = create_subsumed( [root_stmt] + stmt_list, 0, dict([]))[1: ]
return( root_node, root_map)
def create_subsumed( stmt_list, cur_ind, stmt_node_map):
if not cur_ind < len(stmt_list):
raise ValueError("Index must be less than the length of the stmt array")
child_list = []
current_stmt = stmt_list[ cur_ind]
while cur_ind < len(stmt_list) - 1 and current_stmt.subsumes( stmt_list[ cur_ind + 1]):
[cur_ind, next_node, stmt_node_map] = create_subsumed( stmt_list, cur_ind + 1, stmt_node_map)
child_list += [ next_node]
# cur_ind += 1
cur_node = ParseTreeNode( child_list, current_stmt)
stmt_node_map[current_stmt] = cur_node
return( cur_ind, cur_node, stmt_node_map)
def convert_string_to_stmt( row):
stmt_string = row.stmt
stmt_string = stmt_string.split(",")
if len(stmt_string) != 4:
raise ValueError("This string should represent a stmt, which has 4 ints for position")
return( Stmt( int(stmt_string[0]) - 1, int(stmt_string[1]) - 1, int(stmt_string[2]) - 1, int(stmt_string[3])))
# convert a row of QL output to a list of statements
# we're subtracting 1 from the line numbers since the queries report starting at line 1
# but we need line 0 for the file
def convert_row_to_stmts( row):
[s_startline, s_startchar, s_endline, s_endchar,
ess_startline, ess_startchar, ess_endline, ess_endchar,
lss_startline, lss_startchar, lss_endline, lss_endchar, filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
ess = Stmt( ess_startline - 1, ess_startchar - 1, ess_endline - 1, ess_endchar)
lss = Stmt( lss_startline - 1, lss_startchar - 1, lss_endline - 1, lss_endchar)
return( [s, ess, lss, filename])
def convert_row_to_stmts_with_calls( row):
[s_startline, s_startchar, s_endline, s_endchar,
ess_startline, ess_startchar, ess_endline, ess_endchar,
lss_startline, lss_startchar, lss_endline, lss_endchar, filename,
cs_startline, cs_startchar, cs_endline, cs_endchar, cs_name, cs_filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
ess = Stmt( ess_startline - 1, ess_startchar - 1, ess_endline - 1, ess_endchar)
lss = Stmt( lss_startline - 1, lss_startchar - 1, lss_endline - 1, lss_endchar)
cs = Stmt( cs_startline - 1, cs_startchar - 1, cs_endline - 1, cs_endchar)
return( [s, ess, lss, filename, cs, cs_name, cs_filename])
def convert_row_to_stmt( row):
[s_startline, s_startchar, s_endline, s_endchar, filename] = row
s = Stmt( s_startline - 1, s_startchar - 1, s_endline - 1, s_endchar)
return( [s, filename])
def keep_first_stmt( row1, s1, s1_file):
if row1.file != s1_file: # if they're not in the same file they can't be overlapping
return True
s2 = row1.to_move
s1_before = (s1.start_line < s2.start_line or (s1.start_line == s2.start_line and s1.start_char < s2.start_char))
s1_after_no_overlap = (s1.start_line > s2.start_line or (s1.start_line == s2.start_line and s1.start_char > s2.start_char))
s1_after_no_overlap = s1_after_no_overlap and (s1.end_line > s2.end_line or (s1.end_line == s2.end_line and s1.end_char > s2.end_char))
return( s1_before or s1_after_no_overlap or s1 == s2) # we'll have removed duplicates at this point so
# return array of strings representing the lines of the statement
def get_stmt(stmt, file_contents):
ind = stmt.start_line
if ind == stmt.end_line and (len(file_contents[ind]) < stmt.start_char or stmt.end_char == -1):
return []
# special case for the statement only being one character -- seems to happen with "{" after generic classes
if ind == stmt.end_line and stmt.start_char == stmt.end_char and not (ind == 0 and stmt.start_char == 0): # fake root node
return( [ file_contents[ ind][ stmt.start_char : stmt.end_char + 1 ]])
# special case if the stmt is only on one line
if ind == stmt.end_line:
adds = 1 if len(file_contents[ind]) > stmt.end_char else 0
end_char = ";" if (adds == 1 and file_contents[ ind][ stmt.end_char] == ";") else ""
end_char = "," if (adds == 1 and file_contents[ ind][ stmt.end_char] == ",") else end_char
return( [ file_contents[ ind][ stmt.start_char : stmt.end_char ] + end_char])
stmt_cont = []
if not len(file_contents[ind]) < stmt.start_char:
stmt_cont = [ file_contents[ ind][ stmt.start_char :]]
ind = ind + 1
while ind < stmt.end_line:
stmt_cont += [ file_contents[ ind]]
ind = ind + 1
stmt_cont += [ file_contents[ ind][ 0 : stmt.end_char + 1]]
return( stmt_cont)
# print an array (should be strings), with each array entry on a new line
# here used to print out the contents of a file, post split on newline
def print_array_newline_sep( to_print):
print( ft.reduce( lambda a, b: a + "\n" + b, to_print, ""))
# save a copy of a specified file (name is oldname_old)
def save_old_copy( filename, file_contents):
print( "Modifying -- " + filename + " -- but not saving an old copy")
# file = open( filename + "_old", 'w')
# file.write( file_contents)
# file.close()
def reprocess_file_name( all_stmts):
org_root = "/home/ellen/Documents/odasa/projects/kactus/revision-2020-January-06--15-50-46/src"
new_root = "/home/ellen/Documents/ASJProj/TESTING_reordering/kactus"
all_stmts['file'] = all_stmts.apply( change_string_root, args=(org_root, new_root), axis=1)
def just_add_timing( dat, full_stmts, print_to_file = False, num_to_swap = -1, time_these_calls = None):
if time_these_calls is not None:
add_calls_timing( time_these_calls[ ~ time_these_calls.call_file.isin( dat.file)], full_stmts, print_to_file, -1)
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
perf_hooks_added = [False]
time_one_file( swaps, rnode, rmap, f, do_time, perf_hooks_added)
if time_these_calls is not None:
calls = time_these_calls[ time_these_calls.call_file == f][['call_stmt', 'call_name']]
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, perf_hooks_added)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: join_stmts(a, b), rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def break_stmt( root_node, root_map, stmt_to_break, breaking_text, filename):
node_to_time = root_map[stmt_to_break]
# all we care about here is the await, we;re just updating the text
# unlike where we were timing the awaits, now we're actually timing the whole statement and have no
# need to split it. all we do is put timing code around the statement
throws_text = "console.warn(\"" + breaking_text + "\");\n "
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_time
node_to_time.child_list.insert( 0, placeholder_node)
node_to_time.text.insert(0, [throws_text])
return( 1)
def break_one_file( row, rnode, rmap, filename):
to_break = row.to_move
breaking_text = "TEMP_VAR_AUTOGEN_CALLING_" + str(row.name) + "__RANDOM"
if break_stmt( rnode, rmap, to_break, breaking_text, filename) == 0:
return(0)
return(1)
def break_everything( dat, full_stmts, print_to_file = False, num_to_swap = -1):
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
swaps.apply( break_one_file, args=(rnode, rmap, f), axis=1)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: a + "\n" + b, rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def add_calls_timing( dat, full_stmts, print_to_file = False, num_to_swap = -1):
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.call_file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
calls = df[ df.call_file == f][['call_stmt', 'call_name']] # they'll already be sorted
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, [False]) # always add perf hooks when just timing a file
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: a + "\n" + b, rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def do_swapping( dat, full_stmts, print_to_file = False, num_to_swap = -1, add_timing = False, pre_swap = True, post_swap = False, time_these_calls = None):
# first, do all the timings in files which we don't also do swaps in
if time_these_calls is not None:
add_calls_timing( time_these_calls[ ~ time_these_calls.call_file.isin( dat.file)], full_stmts, print_to_file, -1)
df = dat
if num_to_swap != -1:
df = pd.DataFrame.head( dat, n = num_to_swap)
files = df.file.unique()
for f in files:
file = open(f, 'r')
file_contents = file.read()
file.close()
if print_to_file:
# save a copy of the file
save_old_copy(f, file_contents)
file_contents = file_contents.split("\n")
swaps = df[ df.file == f][['to_move', 'swap_before', 'swap_after']] # they'll already be sorted
# stmt_file_name = f[f.rindex("/") + 1 : -3] + "_stmts.txt" # the last "/" until the end is the root file name, then the -3 gets rid of the .ts or .js
# all_stmts_data = pd.read_csv(stmt_file_name, sep = ',', header=None)
# all_stmts = all_stmts_data.apply(convert_row_to_stmt, axis=1, result_type='expand')
# all_stmts.columns = ['stmt', 'file']
# all_stmts.sort_values(['file', 'stmt'], inplace=True)
# reprocess_file_name( all_stmts)
all_stmts = full_stmts[full_stmts.file == f]
all_stmts.sort_values(['file','stmt'], inplace=True)
# create the parse tree for this whole file
(rnode, rmap) = convert__file_spec_stmt_list_to_tree( all_stmts.stmt.to_list(), file_contents)
rnode.set_text( file_contents)
add_swaps_to_all_stmt( all_stmts, swaps)
perf_hooks_added = [False] # tracking whether or not we need to add perf_hooks to the file (once it's added once, don't add it again) -- it's an array for pass by ref
if pre_swap:
file_sum = pd.DataFrame()
if not post_swap: # only do the self-swaps if we're not post-swapping
file_sum = deal_with_self_preswaps( swaps[swaps.to_move == swaps.swap_before], rnode, rmap, (add_timing and not post_swap), f)
preswap_one_file( swaps[swaps.to_move != swaps.swap_before], rnode, rmap, (add_timing and not post_swap), f, (0 if file_sum.empty else file_sum.sum()), perf_hooks_added)
if pre_swap and post_swap:
swaps.swap_after = preprocess_df_both_reorders( swaps)
if post_swap: # implies not preswap
deal_with_self_postswaps( swaps[swaps.to_move == swaps.swap_after], rnode, rmap, add_timing, f, perf_hooks_added)
lateswap_one_file( swaps[swaps.to_move != swaps.swap_after], rnode, rmap, add_timing, f, perf_hooks_added)
if time_these_calls is not None:
calls = time_these_calls[ time_these_calls.call_file == f][['call_stmt', 'call_name']]
calls['call_stmt'] = calls.apply( lambda row: all_stmts[all_stmts.stmt == row.call_stmt].stmt.to_list()[0], axis=1)
time_one_file( calls, rnode, rmap, f, do_time_call, perf_hooks_added)
if print_to_file:
file = open( f, 'w')
file.write(ft.reduce( lambda a, b: join_stmts(a, b), rnode.get_text(), "").lstrip())
file.close()
else:
print("PROCESSING----------------------------------------------------")
print(f)
print("FILE CONTENTS BELOW-------------------------------------------")
print_array_newline_sep(rnode.get_text())
def do_self_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
if not add_timing:
if split_single_await( rnode, rmap, to_move, temp_var_name, add_timing, filename) == 0:
return(0)
else:
if time_await( rnode, rmap, to_move, temp_var_name, filename) == 0:
return(0)
return(1)
def deal_with_self_preswaps( self_swaps, rnode, rmap, add_timing = False, filename = ""):
# don't need to do any recursive checking, since this is all going to be self-swaps
# all we need to do is split the await, basically the same work as if we were just adding timing
to_ret = self_swaps.apply( do_self_swap, args=(rnode, rmap, add_timing, filename), axis=1)
if to_ret is not None:
return( to_ret)
return( pd.DataFrame())
# need to split an await statement even if it cant be swapped any earlier, since when we move it
# later the promise creation needs to stay where it was originally
def split_single_await( root_node, root_map, stmt_to_split, temp_var_name, add_timing = False, filename = ""):
node_to_split = root_map[stmt_to_split]
# all we care about here is the await, we;re just updating the text
(text_ind, ind_to_split_at) = get_index_with_await(node_to_split.text)
(front_paren_stack, end_paren_stack) = get_compensating_parens( node_to_split.text, text_ind, ind_to_split_at)
if ind_to_split_at == -1 or front_paren_stack == -1: # then this is a problem, and we just won't do the reordering
print("There's an issue and we can't automatically split the following statement: ")
node_to_split.print()
print("DONE REPORTING PROBLEMS")
return( 0)
# split both the text array and the child_list
start_text = node_to_split.text[text_ind][0 : ind_to_split_at]
end_text = node_to_split.text[text_ind][ ind_to_split_at + 1 : ]
# get the text we're going to split:
split_text = node_to_split.text[text_ind][ind_to_split_at]
# adapted from the time_await code, it's basically the same thing but we're not adding timing
# we're just splitting the await
new_await_var = "var " + temp_var_name + " = await " + front_paren_stack + split_text[ split_text.index('await') + len('await') : ]
string_before_await = split_text[0: split_text.index('await')] + " " + temp_var_name
# if build_paren_stack(string_before_await) != "": # the addition of the varname doesnt change this funcionality
# # don't even split the text any more
# node_to_split.text = [[timing_pre_text]+ node_to_split.text[text_ind] + ["\n" + timing_post_text]]
# else:
node_to_split.text = [start_text + [new_await_var]+ end_text + [end_paren_stack + string_before_await]]
placeholder_node = ParseTreeNode([], None)
placeholder_node.parent = node_to_split
node_to_split.child_list.insert( ind_to_split_at, placeholder_node)
return( 1)
def deal_with_self_postswaps( self_swaps, rnode, rmap, add_timing = False, filename = "", perf_hooks_added = [False]):
# don't need to do any recursive checking, since this is all going to be self-swaps
# all we need to do is split the await, basically the same work as if we were just adding timing
if add_timing:
results = self_swaps.apply( do_time, args=(rnode, rmap, filename), axis=1)
if not results.empty and results.sum() > 0 and not perf_hooks_added[0]:
req_perf_node = ParseTreeNode( [], None)
req_perf_node.parent = rnode
req_perf_node.text = [["const perf_hooks = require(\'perf_hooks\'); "]]
ind_to_insert = 0
if len(rnode.text) > 0 and len(rnode.text[0]) > 0 and len(rnode.text[0][0]) > 1 and rnode.text[0][0][0:2] == "#!": # cant move above #! command
ind_to_insert = 1
rnode.child_list.insert(ind_to_insert, req_perf_node)
rnode.text.insert(ind_to_insert, [""])
# update to say we've adding perf_hooks to this file, and therefore don't need to do it again
perf_hooks_added[0] = True
# replace the root of a string with a new specified root
# throw an exception if the string does not have the root old_root
def change_string_root( row, org_root, new_root, is_call = False):
org_string = ""
if not is_call:
org_string = row.file
else:
org_string = row.call_file
if org_string.index(org_root) != 0:
raise ValueError("The original path " + org_string + " does not have the original root: " + org_root)
return( new_root + org_string[ len(org_root): ])
def add_swaps_to_all_stmt( all_stmts, swap_df):
# since the rmap is indexed by statement object, and we've created the swap_associations and all_stmts dataframes
# separately, we can't index the rmap with the swap_associations
# so, we need to add a column to all_stmts, with the corresponding association but with the right objects
try:
swap_df['to_move'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.to_move].stmt.to_list()[0], axis=1) # can index at 0 since there will only be one
swap_df['swap_before'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.swap_before].stmt.to_list()[0], axis=1)
swap_df['swap_after'] = swap_df.apply( lambda row: all_stmts[all_stmts.stmt == row.swap_after].stmt.to_list()[0], axis=1)
except IndexError:
print(swap_df)
def do_early_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
move_before = row.swap_before
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
print(row)
if move_and_split_await( rnode, rmap, to_move, move_before, temp_var_name, add_timing, filename) == 0:
return(0)
return(1)
def do_late_swap( row, rnode, rmap, add_timing = False, filename = ""):
to_move = row.to_move
move_after = row.swap_after
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
# these should be the same stmts, since we should only have one DF
# probably make a column in our DF that is "swap_before", and then run through the ones with values
# move_stmt( rnode, rmap, to_move, move_before)
if move_await_later( rnode, rmap, to_move, move_after, temp_var_name, add_timing, filename) == 0:
return(0)
return(1)
def do_time( row, rnode, rmap, filename):
to_time = row.to_move
temp_var_name = "TEMP_VAR_AUTOGEN" + str(row.name) + "__RANDOM"
if time_await( rnode, rmap, to_time, temp_var_name, filename) == 0:
return(0)
return(1)
def do_time_call( row, rnode, rmap, filename):
to_time = row.call_stmt
temp_var_name = "TEMP_VAR_AUTOGEN_CALLING_" + str(row.name) + "_" + row.call_name + "__RANDOM"
if time_call( rnode, rmap, to_time, temp_var_name, filename) == 0:
return(0)
return(1)
# function to preprocess a dataframe of swaps for ONE FILE
# if we're doing both forward and backward swapping, there's no guarantee that
# a statement can swap down to something below where another statement is swapping up
# and, since there is no dependency check between these statements, we need to conservatively
# assume that there is a dependency
# solution: set the swap_after to be the min of swap_after and the swap_befores of any stmts
# which themselves are > current statement and their swap_befores are > current_stmt
def preprocess_df_both_reorders( swap_df):
# for each statement
return(swap_df.apply( get_late_swap_for_row, args=( swap_df, ), axis=1))
def get_late_swap_for_row( row, df):
to_consider = df[(df.to_move > row.swap_after) & (df.swap_before > row.to_move)]
if to_consider.empty:
return( row.swap_after)
earliest_later_up = to_consider.swap_before.min()
return( min( row.swap_after, earliest_later_up))
def swap_condition( results, tsum):
if results.empty and tsum > 0:
return True
elif results.empty:
return False
elif results.sum() > 0:
return True
return False
def preswap_one_file( swap_df, rnode, rmap, add_timing = False, filename = "", tsum = 0, perf_hooks_added=[False], counter=0):
recursive_swaps = swap_df[swap_df.swap_before.isin(swap_df.to_move)]
if not recursive_swaps.empty:
preswap_one_file( recursive_swaps, rnode, rmap, add_timing, filename, tsum, perf_hooks_added, counter + 1)
swap_df = | pd.concat([recursive_swaps, swap_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 30 10:55:38 2020
@author: ykrempp
"""
#deal with rows and columns, save a CSV
import pandas as pd
df = pd.read_csv('manual_vs_auto.csv')
#drop columns
df1 = df.drop('Manual2', axis=1)
print(df.head())
print(df1.head())
df2 = df.drop(['Manual2','Auto_th_2'], axis =1)
print(df2.head())
#adding columns
df['Date'] = '2020-30-01'
print(df.dtypes)
print(df.head())
#adding a real date not a string (object)
df['RealDate'] = | pd.to_datetime('2020-01-30') | pandas.to_datetime |
import pytest
import pandas as pd
from collections import ChainMap
from arize.pandas.logger import Schema
from arize.utils.types import Environments, ModelTypes
from arize.pandas.validation.validator import Validator
from arize.pandas.validation.errors import MissingColumns
def test_missing_prediction_id():
errors = Validator.validate_params(
**ChainMap(
{
"dataframe": pd.DataFrame({"prediction_label": pd.Series(["fraud"])}),
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
),
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_timestamp():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
timestamp_column_name="prediction_timestamp",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_feature_columns():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
feature_column_names=["A"],
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_shap_columns():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
shap_values_column_names={"A": "aa"},
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_prediction_label():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="B",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_prediction_score():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
prediction_score_column_name="C",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_actual_label():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
actual_label_column_name="D",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_actual_score():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
prediction_label_column_name="prediction_label",
actual_score_column_name="E",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
def test_missing_multiple():
errors = Validator.validate_params(
**ChainMap(
{
"schema": Schema(
prediction_id_column_name="prediction_id",
timestamp_column_name="prediction_timestamp",
feature_column_names=["A"],
shap_values_column_names={"A": "aa"},
prediction_label_column_name="B",
prediction_score_column_name="C",
actual_label_column_name="D",
actual_score_column_name="E",
)
},
kwargs,
),
)
assert len(errors) == 1
assert type(errors[0]) is MissingColumns
kwargs = {
"model_id": "fraud",
"model_version": "v1.0",
"model_type": ModelTypes.SCORE_CATEGORICAL,
"environment": Environments.PRODUCTION,
"dataframe": pd.DataFrame(
{
"prediction_id": | pd.Series(["0"]) | pandas.Series |
import pandas as pd
from bs4 import BeautifulSoup
import requests
#dependencies
headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' }
def divide(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def listUp(theList, index, whichList):
for i in theList:
whichList.append(i[index])
def removeExtraIndex(indexxx):
for i in indexxx:
i.remove('')
def removeDupIndex(indexx, indexxx):
for i in indexx:
k = list(dict.fromkeys(i))
indexxx.append(k)
def joinIndex(indexxx, newIndex):
for i in range(len(indexxx)):
k = ' '.join(indexxx[i])
newIndex.append(k)
def incomeStmt(ticker):
url = "https://www.marketwatch.com/investing/stock/" + ticker + "/financials"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
priceHtml = soup.find_all("td", class_="overflow__cell")
yearHtml = soup.find_all("th", class_="overflow__heading")
years = [s.get_text() for s in yearHtml]
years = years[1:6]
table = [i.get_text() for i in priceHtml]
table = [s.replace('\n', ' ') for s in table]
table = [s.replace('B', '') for s in table]
table = [s.replace('%', '') for s in table]
table = [s.replace('(', '-') for s in table]
table = [s.replace(')', '') for s in table]
x = list(divide(table, 7))
index = []
year1 = []
year2 = []
year3 = []
year4 = []
year5 = []
listUp(x, 0, index)
listUp(x, 1, year1)
listUp(x, 2, year2)
listUp(x, 3, year3)
listUp(x, 4, year4)
listUp(x, 5, year5)
index = [s.replace('\n', ' ') for s in index]
index = [s.split(' ') for s in index]
indexx= []
removeDupIndex(index, indexx)
removeExtraIndex(indexx)
newIndex = []
joinIndex(indexx, newIndex)
year1 = [s.replace('M', '') for s in year1]
year2 = [s.replace('M', '') for s in year2]
year3 = [s.replace('M', '') for s in year3]
year4 = [s.replace('M', '') for s in year4]
year5 = [s.replace('M', '') for s in year5]
financialStmt = pd.DataFrame()
financialStmt['Category']=newIndex
financialStmt[years[0]]=year1
financialStmt[years[1]]=year2
financialStmt[years[2]]=year3
financialStmt[years[3]]=year4
financialStmt[years[4]]=year5
financialStmt = financialStmt.set_index('Category')
return financialStmt
def balanceSheet(ticker):
url = "https://www.marketwatch.com/investing/stock/" + ticker + "/financials/balance-sheet"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
priceHtml = soup.find_all("td", class_="overflow__cell")
yearHtml = soup.find_all("th", class_="overflow__heading")
years = [s.get_text() for s in yearHtml]
years = years[1:6]
table = [i.get_text() for i in priceHtml]
table = [s.replace('\n', ' ') for s in table]
table = [s.replace('B', '') for s in table]
table = [s.replace('%', '') for s in table]
table = [s.replace('(', '-') for s in table]
table = [s.replace(')', '') for s in table]
x = list(divide(table, 7))
index = []
year1 = []
year2 = []
year3 = []
year4 = []
year5 = []
listUp(x, 0, index)
listUp(x, 1, year1)
listUp(x, 2, year2)
listUp(x, 3, year3)
listUp(x, 4, year4)
listUp(x, 5, year5)
index = [s.replace('\n', ' ') for s in index]
index = [s.split(' ') for s in index]
indexx= []
removeDupIndex(index, indexx)
removeExtraIndex(indexx)
newIndex = []
joinIndex(indexx, newIndex)
year1 = [s.replace('M', '') for s in year1]
year2 = [s.replace('M', '') for s in year2]
year3 = [s.replace('M', '') for s in year3]
year4 = [s.replace('M', '') for s in year4]
year5 = [s.replace('M', '') for s in year5]
financialStmt = pd.DataFrame()
financialStmt['Category']=newIndex
financialStmt[years[0]]=year1
financialStmt[years[1]]=year2
financialStmt[years[2]]=year3
financialStmt[years[3]]=year4
financialStmt[years[4]]=year5
financialStmt = financialStmt.set_index('Category')
pd.set_option("display.max_rows", None, "display.max_columns", None)
return financialStmt
def cashFlow(ticker):
url = "https://www.marketwatch.com/investing/stock/" + ticker + "/financials/cash-flow"
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
priceHtml = soup.find_all("td", class_="overflow__cell")
yearHtml = soup.find_all("th", class_="overflow__heading")
years = [s.get_text() for s in yearHtml]
years = years[1:6]
table = [i.get_text() for i in priceHtml]
table = [s.replace('\n', ' ') for s in table]
table = [s.replace('B', '') for s in table]
table = [s.replace('%', '') for s in table]
table = [s.replace('(', '-') for s in table]
table = [s.replace(')', '') for s in table]
x = list(divide(table, 7))
index = []
year1 = []
year2 = []
year3 = []
year4 = []
year5 = []
listUp(x, 0, index)
listUp(x, 1, year1)
listUp(x, 2, year2)
listUp(x, 3, year3)
listUp(x, 4, year4)
listUp(x, 5, year5)
index = [s.replace('\n', ' ') for s in index]
index = [s.split(' ') for s in index]
indexx= []
removeDupIndex(index, indexx)
removeExtraIndex(indexx)
newIndex = []
joinIndex(indexx, newIndex)
year1 = [s.replace('M', '') for s in year1]
year2 = [s.replace('M', '') for s in year2]
year3 = [s.replace('M', '') for s in year3]
year4 = [s.replace('M', '') for s in year4]
year5 = [s.replace('M', '') for s in year5]
financialStmt = pd.DataFrame()
financialStmt['Category']=newIndex
financialStmt[years[0]]=year1
financialStmt[years[1]]=year2
financialStmt[years[2]]=year3
financialStmt[years[3]]=year4
financialStmt[years[4]]=year5
financialStmt = financialStmt.set_index('Category')
| pd.set_option("display.max_rows", None, "display.max_columns", None) | pandas.set_option |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from stellargraph.core.convert import (
ColumnarConverter,
convert_nodes,
convert_edges,
from_networkx,
)
_EMPTY_DF = pd.DataFrame([], index=[1, 2])
def test_columnar_convert_type_default():
converter = ColumnarConverter("some_name", "foo", None, {}, {}, False, {})
ids, columns, type_starts, features = converter.convert(_EMPTY_DF)
np.testing.assert_array_equal(ids, [1, 2])
assert columns == {}
assert type_starts == [("foo", 0)]
assert "foo" in features
def test_columnar_convert_selected_columns():
df = _EMPTY_DF.assign(before="abc", same=10)
converter = ColumnarConverter(
"some_name", "foo", None, {}, {"before": "after", "same": "same"}, False, {}
)
ids, columns, type_starts, features = converter.convert({"x": df, "y": df})
np.testing.assert_array_equal(ids, [1, 2, 1, 2])
assert type_starts == [("x", 0), ("y", 2)]
assert "before" not in columns
np.testing.assert_array_equal(columns["after"], "abc")
np.testing.assert_array_equal(columns["same"], 10)
def test_columnar_convert_selected_columns_missing():
converter = ColumnarConverter(
"some_name", "foo", None, {}, {"before": "after", "same": "same"}, False, {}
)
with pytest.raises(
ValueError, match=r"some_name\['x'\]: expected 'before', 'same' columns, found:"
):
converter.convert({"x": _EMPTY_DF})
def test_columnar_convert_column_default():
converter = ColumnarConverter(
"some_name", "foo", None, {"before": 123}, {"before": "before"}, False, {}
)
ids, columns, type_starts, features = converter.convert(
{"x": _EMPTY_DF, "y": _EMPTY_DF}
)
assert type_starts == [("x", 0), ("y", 2)]
np.testing.assert_array_equal(columns["before"], 123)
def test_columnar_convert_column_default_selected_columns():
# the defaulting happens before the renaming
converter = ColumnarConverter(
"x", "foo", None, {"before": 123}, {"before": "after"}, False, {}
)
ids, columns, type_starts, features = converter.convert(
{"x": _EMPTY_DF, "y": _EMPTY_DF}
)
assert type_starts == [("x", 0), ("y", 2)]
assert "before" not in columns
np.testing.assert_array_equal(columns["after"], 123)
def test_columnar_convert_features():
converter = ColumnarConverter("some_name", "foo", None, {}, {"x": "x"}, True, {})
df = _EMPTY_DF.assign(a=[1, 2], b=[100, 200], x=123)
ids, columns, type_starts, features = converter.convert(df)
assert type_starts == [("foo", 0)]
np.testing.assert_array_equal(columns["x"], 123)
assert np.array_equal(features["foo"], [[1, 100], [2, 200]])
def test_columnar_convert_disallow_features():
converter = ColumnarConverter("some_name", "foo", None, {}, {}, False, {})
df = _EMPTY_DF.assign(a=1)
with pytest.raises(ValueError, match="expected zero feature columns, found 'a'"):
shared, type_starts, features = converter.convert(df)
def test_columnar_convert_invalid_input():
converter = ColumnarConverter("some_name", "foo", None, {}, {}, False, {})
with pytest.raises(TypeError, match="some_name: expected dict, found int"):
converter.convert(1)
with pytest.raises(
TypeError, match=r"some_name\['x'\]: expected pandas DataFrame, found int",
):
converter.convert({"x": 1})
def test_columnar_convert_type_column():
converter = ColumnarConverter(
"some_name",
"foo",
"type_column",
{},
{"type_column": "TC", "data": "D"},
False,
{},
)
df = pd.DataFrame(
{"type_column": ["c", "a", "a", "c", "b"], "data": [1, 2, 3, 4, 5]},
index=[1, 10, 100, 1000, 10000],
)
ids, columns, type_starts, features = converter.convert(df)
assert columns.keys() == {"D"}
np.testing.assert_array_equal(ids, [10, 100, 10000, 1, 1000])
np.testing.assert_array_equal(columns["D"], [2, 3, 5, 1, 4])
assert type_starts == [("a", 0), ("b", 2), ("c", 3)]
assert features == {"a": None, "b": None, "c": None}
# invalid configurations
with pytest.raises(
ValueError, match=r"allow_features: expected no features .* \('type_column'\)"
):
ColumnarConverter(
"some_name", "foo", "type_column", {}, {"type_column": "TC"}, True, {}
)
with pytest.raises(
ValueError,
match=r"selected_columns: expected type column \('type_column'\) .* found only 'TC', 'data'",
):
ColumnarConverter(
"some_name",
"foo",
"type_column",
{},
{"TC": "type_column", "data": "D"},
False,
{},
)
def test_columnar_convert_transform_columns():
columns = {"x": np.complex128(1), "y": np.uint16(2), "z": np.float32(3.0)}
dfs = {
name: pd.DataFrame({"s": [0], "t": [1], "w": [w]}, index=[i])
for i, (name, w) in enumerate(columns.items())
}
converter = ColumnarConverter(
"some_name",
float,
None,
column_defaults={},
selected_columns={"s": "ss", "t": "tt", "w": "ww",},
transform_columns={"w": lambda x: x + 1,},
allow_features=False,
)
ids, columns, type_starts, _ = converter.convert(dfs)
assert columns["ww"][0] == 2
assert columns["ww"][1] == 3
assert columns["ww"][2] == 4
assert type_starts == [("x", 0), ("y", 1), ("z", 2)]
np.testing.assert_array_equal(columns["ss"], 0)
np.testing.assert_array_equal(columns["tt"], 1)
def test_convert_edges_weights():
def run(ws):
dfs = {
name: pd.DataFrame({"s": [0], "t": [1], "w": [w]}, index=[i])
for i, (name, w) in enumerate(ws.items())
}
nodes = convert_nodes(
pd.DataFrame([], index=[0, 1]),
name="other_name",
default_type=np.int8,
dtype=np.int8,
)
convert_edges(
dfs,
name="some_name",
default_type="d",
source_column="s",
target_column="t",
weight_column="w",
nodes=nodes,
type_column=None,
)
# various numbers are valid
run({"x": np.int8(1)})
run({"x": np.complex64(1)})
run({"x": np.complex128(1), "y": np.uint16(2), "z": np.float32(3.0)})
# non-numbers are not
with pytest.raises(
TypeError,
match=r"some_name: expected weight column 'w' to be numeric, found dtype 'object'",
):
run({"x": "ab", "y": 1, "z": np.float32(2)})
def test_convert_edges_type_column():
data = pd.DataFrame(
{
"s": [10, 20, 30, 40, 50],
"t": [20, 30, 40, 50, 60],
"l": ["c", "a", "a", "c", "b"],
}
)
nodes = pd.DataFrame([], index=[10, 20, 30, 40, 50, 60])
nodes = convert_nodes(nodes, name="other_name", default_type=np.int8, dtype=np.int8)
edges = convert_edges(
data,
name="some_name",
default_type="d",
source_column="s",
target_column="t",
weight_column="w",
type_column="l",
nodes=nodes,
)
np.testing.assert_array_equal(edges.sources, [1, 2, 4, 0, 3])
np.testing.assert_array_equal(edges.targets, [2, 3, 5, 1, 4])
np.testing.assert_array_equal(
edges.type_of_iloc(slice(None)), ["a", "a", "b", "c", "c"]
)
def from_networkx_for_testing(g, node_features=None, dtype="float32"):
return from_networkx(
g,
node_type_attr="n",
edge_type_attr="e",
node_type_default="a",
edge_type_default="x",
edge_weight_attr="w",
node_features=node_features,
dtype=dtype,
)
def test_from_networkx_empty():
nodes, edges = from_networkx_for_testing(nx.DiGraph())
assert nodes == {}
assert edges == {}
def assert_dataframe_dict_equal(new, expected):
assert sorted(new.keys()) == sorted(expected.keys())
for k, expected_value in expected.items():
pd.testing.assert_frame_equal(new[k], expected_value)
# default value for edge weights
W = np.float32(1)
def test_from_networkx_graph_only():
raw_edges = [(0, 1), (0, 2), (0, 2), (1, 2), (1, 2)]
expected_nodes = {"a": pd.DataFrame(columns=range(0), index=[0, 1, 2])}
expected_edges = {
"x": pd.DataFrame(raw_edges, columns=["source", "target"]).assign(w=W)
}
g = nx.MultiDiGraph()
g.add_edges_from(raw_edges)
nodes, edges = from_networkx_for_testing(g)
assert_dataframe_dict_equal(nodes, expected_nodes)
assert_dataframe_dict_equal(edges, expected_edges)
def test_from_networkx_ignore_unknown_attrs():
raw_nodes = [(0, {"foo": 123})]
raw_edges = [(0, 0, {"bar": 456})]
expected_nodes = {"a": pd.DataFrame(columns=range(0), index=[0])}
expected_edges = {
"x": pd.DataFrame([(0, 0)], columns=["source", "target"], index=[0]).assign(w=W)
}
g = nx.MultiDiGraph()
g.add_nodes_from(raw_nodes)
g.add_edges_from(raw_edges)
nodes, edges = from_networkx_for_testing(g)
assert_dataframe_dict_equal(nodes, expected_nodes)
assert_dataframe_dict_equal(edges, expected_edges)
def test_from_networkx_heterogeneous_partial():
# check that specifying node and edge types works, even when interleaved with unspecified types
a_nodes = [0, (1, {"n": "a"})]
b_nodes = [(2, {"n": "b"})]
expected_nodes = {
"a": pd.DataFrame(columns=range(0), index=[0, 1]),
"b": pd.DataFrame(columns=range(0), index=[2]),
}
x_edges = [(0, 2, {"e": "x"}), (0, 2), (1, 2), (1, 2)]
xs = len(x_edges)
y_edges = [(0, 1, {"e": "y"})]
ys = len(y_edges)
expected_edges = {
"x": pd.DataFrame(
[t[:2] for t in x_edges], columns=["source", "target"], index=[0, 1, 3, 4],
).assign(w=W),
"y": pd.DataFrame(
[t[:2] for t in y_edges], columns=["source", "target"], index=[2],
).assign(w=W),
}
g = nx.MultiDiGraph()
g.add_nodes_from(a_nodes)
g.add_nodes_from(b_nodes)
g.add_edges_from(x_edges)
g.add_edges_from(y_edges)
nodes, edges = from_networkx_for_testing(g)
assert_dataframe_dict_equal(nodes, expected_nodes)
assert_dataframe_dict_equal(edges, expected_edges)
def test_from_networkx_weights():
expected_nodes = {
"a": pd.DataFrame(columns=range(0), index=[0, 2, 1]),
}
x_edges = [(0, 2, {"w": 2.0}), (0, 2), (1, 2), (1, 2)]
xs = len(x_edges)
y_edges = [(0, 1, {"w": 3.0, "e": "y"})]
ys = len(y_edges)
def df_edge(edge_tuple):
src, tgt = edge_tuple[:2]
try:
attrs = edge_tuple[2]
except IndexError:
attrs = {}
weight = attrs.get("w", 1)
return src, tgt, weight
expected_edges = {
"x": pd.DataFrame(
[df_edge(t) for t in x_edges],
columns=["source", "target", "w"],
index=[0, 1, 3, 4],
),
"y": pd.DataFrame(
[df_edge(t) for t in y_edges], columns=["source", "target", "w"], index=[2]
),
}
g = nx.MultiDiGraph()
g.add_edges_from(x_edges + y_edges)
nodes, edges = from_networkx_for_testing(g)
assert_dataframe_dict_equal(nodes, expected_nodes)
assert_dataframe_dict_equal(edges, expected_edges)
@pytest.mark.parametrize(
"feature_type",
[
"nodes",
"dataframe no types",
"dataframe types",
"iterable no types",
"iterable types",
],
)
@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"])
def test_from_networkx_homogeneous_features(feature_type, dtype):
features = [[0, 1, 2], [3, 4, 5]]
def node(i, **kwargs):
feat = {"f": features[i]} if feature_type == "nodes" else {}
return i, feat
a_nodes = [node(0), node(1)]
if feature_type == "nodes":
node_features = "f"
elif feature_type == "dataframe no types":
node_features = pd.DataFrame(features)
elif feature_type == "dataframe types":
node_features = {"a": pd.DataFrame(features)}
elif feature_type == "iterable no types":
node_features = enumerate(features)
elif feature_type == "iterable types":
node_features = {"a": enumerate(features)}
expected_nodes = {"a": pd.DataFrame(features, dtype=dtype)}
g = nx.MultiDiGraph()
g.add_nodes_from(a_nodes)
nodes, edges = from_networkx_for_testing(g, node_features, dtype)
assert_dataframe_dict_equal(nodes, expected_nodes)
assert edges == {}
@pytest.mark.parametrize(
"feature_type", ["nodes", "dataframe", "iterable no types", "iterable types"]
)
@pytest.mark.parametrize("dtype", ["float16", "float32", "float64"])
def test_from_networkx_heterogeneous_features(feature_type, dtype):
a_features = [[0, 1, 2], [3, 4, 5]]
b_features = [[6, 7, 8, 9]]
# node type c has no features
def node(i, feats, **kwargs):
feat = {"f": feats} if feature_type == "nodes" else {}
return (i, {**kwargs, **feat})
# make sure the default node type is applied correctly
a_nodes = [node(0, a_features[0]), node(2, a_features[1], n="a")]
b_nodes = [node(1, b_features[0], n="b")]
c_nodes = [(3, {"n": "c"})]
if feature_type == "nodes":
node_features = "f"
elif feature_type == "dataframe":
node_features = {
"a": pd.DataFrame(a_features, index=[0, 2]),
"b": pd.DataFrame(b_features, index=[1]),
# c is implied
}
elif feature_type == "iterable no types":
node_features = zip([0, 2, 1, 3], a_features + b_features + [[]])
elif feature_type == "iterable types":
node_features = {
"a": zip([0, 2], a_features),
"b": zip([1], b_features),
"c": [(3, [])],
}
expected_nodes = {
"a": | pd.DataFrame(a_features, index=[0, 2], dtype=dtype) | pandas.DataFrame |
#!/usr/bin/env python
#
# eval_archive_to_wiki.py
# <NAME> <<EMAIL>>
# 2018-09-19
import datetime
import logging
import math
import numbers
import sys
from zipfile import ZipFile
import pandas as pd
logging.basicConfig(
format='%(asctime)s army-ant: [%(name)s] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
decimal_places = 4
favorite_metrics = ['GMAP', 'MAP', 'Macro Avg Prec', 'Macro Avg Rec', 'NDCG@10', 'P@10']
def millis_format(millis):
if not isinstance(millis, numbers.Number) or math.isnan(millis):
return "N/A"
out = {}
out['days'] = millis // 86400000
millis -= out['days'] * 86400000
out['hours'] = millis // 3600000
millis -= out['hours'] * 3600000
out['minutes'] = millis // 60000
millis -= out['minutes'] * 60000
out['seconds'] = millis // 1000
millis -= out['seconds'] * 1000
out['millis'] = millis
if out['days'] == 0:
del out['days']
if out['hours'] == 0:
del out['hours']
if out['minutes'] == 0:
del out['minutes']
if out['seconds'] == 0:
del out['seconds']
return '%s%s%s%s%s' % (
'%.2dd ' % out['days'] if 'days' in out else '',
'%.2dh ' % out['hours'] if 'hours' in out else '',
'%.2dm ' % out['minutes'] if 'minutes' in out else '',
'%.2ds ' % out['seconds'] if 'seconds' in out else '',
'%.3dms' % out['millis'])
if len(sys.argv) < 2:
print("Usage: %s ARCHIVE [FUNCTION_NAME] [PARAM_NAMES ...]" % sys.argv[0])
sys.exit(1)
if len(sys.argv) > 2:
function_name = sys.argv[2]
else:
function_name = 'score'
if len(sys.argv) > 3:
param_names = sys.argv[3:]
else:
param_names = None
with ZipFile(sys.argv[1]) as f_zip:
eval_metrics = [zip_obj for zip_obj in f_zip.filelist if zip_obj.filename.endswith('eval_metrics.csv')]
if len(eval_metrics) < 1:
logging.error("No eval_metrics.csv file found")
sys.exit(2)
if len(eval_metrics) > 1:
logging.warning("Multiple eval_metrics.csv files found, using %s" % eval_metrics[0].filename)
eval_stats = [zip_obj for zip_obj in f_zip.filelist if zip_obj.filename.endswith('eval_stats.csv')]
if len(eval_stats) < 1:
logging.error("No eval_stats.csv file found")
sys.exit(2)
if len(eval_stats) > 1:
logging.warning("Multiple eval_stats.csv files found, using %s" % eval_stats[0].filename)
with f_zip.open(eval_metrics[0].filename, 'r') as f:
df = pd.read_csv(f)
if param_names is None:
param_names = [col for col in df.columns if not col in ['metric', 'value']]
df['Version'] = df[param_names].apply(lambda d: "%s(%s)" % (
function_name,
', '.join(['%s=%s' % (p, x) for p, x in zip(param_names, d)])
) , axis=1)
df = df[['Version', 'metric', 'value']].pivot(index='Version', columns='metric', values='value')
eval_metrics_df = df.reset_index().rename_axis(None, axis=1)[['Version'] + favorite_metrics]
with f_zip.open(eval_stats[0].filename, 'r') as f:
df = pd.read_csv(f)
df['Version'] = df[param_names].apply(lambda d: "%s(%s)" % (
function_name,
', '.join(['%s=%s' % (p, x) for p, x in zip(param_names, d)])
) , axis=1)
df['value'] = df['value'].apply(millis_format)
df = df[['Version', 'stat', 'value']].pivot(index='Version', columns='stat', values='value')
eval_stats_df = df.reset_index().rename_axis(None, axis=1)\
.rename(columns={'avg_query_time': 'Avg./Query', 'total_query_time': 'Total Query Time' })
df = | pd.merge(eval_metrics_df, eval_stats_df, how='outer', on='Version') | pandas.merge |
"""
MIT License
Copyright (c) 2018 <NAME> Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import numpy as np
import pandas as pd
import pytest
from .. import cter
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
OUTPUT_TEST_FOLDER = 'OUTPUT_TESTS_DUMP'
INPUT_TEST_FOLDER = '../../../test_files'
class TestGetCterV10HeaderNames:
def test_call_functions_should_return_filled_list(self):
data = [
'defocus',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'astigmatism_amplitude',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
]
assert cter.get_cter_v1_0_header_names() == data
class TestLoadCterV10:
def test_correct_multiline_file_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_multiline.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [[
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]] * 2
data_frame = pd.DataFrame(
data,
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_low_angle_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_low_angle.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
def test_correct_file_high_angle_should_return_filled_data_frame(self):
file_name = os.path.join(THIS_DIR, INPUT_TEST_FOLDER, 'cter_v1_0_high_angle.txt')
return_frame = cter.load_cter_v1_0(file_name=file_name)
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.307018,
2.779399,
2.279982,
2.279982,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
assert data_frame.round(5).equals(return_frame.round(5))
class TestDefocusDefocusDiffToDefocuUAndV:
def test_defocus_2_um_zero_astigmatism_should_return_20000_angstrom(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(2, 0)
assert def_u == 20000
def test_zero_astigmatism_should_return_same_values(self):
def_u, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2, 0)
assert def_u == def_v
def test_values_should_return_correct_defocus_u(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, 0.1)
assert def_u == 20000
def test_values_should_return_correct_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, 0.1)
assert def_v == 21000
def test_values_inverse_should_return_correct_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(2.05, -0.1)
assert def_v == 20000
def test_multi_input_should_return_multi_output_defocus_u(self):
def_u, _ = cter.defocus_defocus_diff_to_defocus_u_and_v(
pd.Series([2, 2.05, 2.05]),
pd.Series([0, -0.1, 0.1])
)
assert def_u.equals(pd.Series([20000, 21000, 20000], dtype=float))
def test_multi_input_should_return_multi_output_defocus_v(self):
_, def_v = cter.defocus_defocus_diff_to_defocus_u_and_v(
pd.Series([2, 2.05, 2.05]),
pd.Series([0, 0.1, -0.1]),
)
assert def_v.equals(pd.Series([20000, 21000, 20000], dtype=float))
class TestDefocuUAndVToDefocusDefocusDiff:
def test_defocus_u_2_um_defocus_v_2_um_should_return_20000_angstrom(self):
defocus, _ = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 20000)
assert defocus == 2
def test_zero_astigmatism_should_return_same_values(self):
_, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 20000)
assert astigmatism == 0
def test_values_should_return_correct_defocus_u(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(21000, 20000)
assert defocus == 2.05
def test_values_should_return_correct_defocus_v(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(21000, 20000)
assert astigmatism == -0.1
def test_values_invert_should_return_correct_defocus_v(self):
defocus, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(20000, 21000)
assert astigmatism == 0.1
def test_multi_input_should_return_multi_output_defocus(self):
defocus, _ = cter.defocus_u_and_v_to_defocus_defocus_diff(
pd.Series([20000, 21000]),
pd.Series([20000, 20000])
)
assert defocus.equals(pd.Series([2, 2.05], dtype=float))
def test_multi_input_should_return_multi_output_astigmatism(self):
_, astigmatism = cter.defocus_u_and_v_to_defocus_defocus_diff(
pd.Series([20000, 21000, 20000]),
pd.Series([20000, 20000, 21000]),
)
assert astigmatism.equals(pd.Series([0., -0.1, 0.1], dtype=float))
class TestDumpCterV10:
def test_valid_cter_data_should_create_partres_file(self, tmpdir):
output_file: str = tmpdir.mkdir(OUTPUT_TEST_FOLDER).join('test_valid_cter_data_should_create_partres_file.star')
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
0.43346,
0.35979,
0.4386,
0.4386,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
cter.dump_cter_v1_0(output_file, data_frame)
assert os.path.exists(output_file)
def test_valid_cter_data_large_angle_should_create_correct_file(self, tmpdir):
output_file: str = tmpdir.mkdir(OUTPUT_TEST_FOLDER).join('test_valid_cter_data_large_angle_should_create_correct_file.star')
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435+720,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.30702,
2.77940,
2.27998,
2.77940,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
cter.dump_cter_v1_0(output_file, data_frame)
expected_data = [[
2.256,
0.01,
300.0,
1.14,
0.0,
10.0,
0.060473,
25.565 ,
0.0010212,
0.0,
0.0021005,
6.5849,
0.045268,
3.4734,
0.4334596,
0.3597899,
0.4386003,
0.3597899,
0.0,
10.0,
0.0,
'test_file.mrc',
]]
input_data = []
with open(output_file, 'r') as read:
for idx, line in enumerate(read.readlines()):
line = line.strip().split()
input_data.append([])
for entry in line:
try:
data = float(entry)
except ValueError:
data = entry
input_data[idx].append(data)
assert expected_data == input_data
def test_valid_cter_data_should_create_correct_file(self, tmpdir):
output_file: str = tmpdir.mkdir(OUTPUT_TEST_FOLDER).join('test_valid_cter_data_should_create_correct_partres_file.star')
columns = (
'DefocusU',
'DefocusV',
'SphericalAberration',
'Voltage',
'PixelSize',
'b_factor',
'total_ac',
'DefocusAngle',
'std_defocus',
'std_total_ac',
'std_astigmatism_amplitude',
'std_astigmatism_angle',
'variation_defocus',
'variation_astigmatism_amplitude',
'resolution_limit_defocus',
'resolution_limit_defocus_astig',
'nyquist',
'CtfMaxResolution',
'spare',
'AmplitudeContrast',
'PhaseShift',
'MicrographNameNoDW'
)
data = [
22257.635,
22862.365,
0.01,
300,
1.14,
0,
0.1,
19.435,
0.0010212,
0,
0.0021005,
6.5849,
0.045268,
3.4734,
2.30702,
2.77940,
2.27998,
2.77940,
0,
0.1,
0,
'test_file.mrc'
]
data_frame = pd.DataFrame(
[data],
columns=columns
)
cter.dump_cter_v1_0(output_file, data_frame)
expected_data = [[
2.256,
0.01,
300.0,
1.14,
0.0,
10.0,
0.060473,
25.565 ,
0.0010212,
0.0,
0.0021005,
6.5849,
0.045268,
3.4734,
0.4334596,
0.3597899,
0.4386003,
0.3597899,
0.0,
10.0,
0.0,
'test_file.mrc',
]]
input_data = []
with open(output_file, 'r') as read:
for idx, line in enumerate(read.readlines()):
line = line.strip().split()
input_data.append([])
for entry in line:
try:
data = float(entry)
except ValueError:
data = entry
input_data[idx].append(data)
assert expected_data == input_data
class TestAmplitudeContrastToAngle:
def test_zero_should_return_zero(self):
value = pd.Series([0])
return_value = pd.Series([0], dtype=float)
assert cter.amplitude_contrast_to_angle(value).equals(return_value)
def test_100_should_return_ninety(self):
value = pd.Series([100])
return_value = pd.Series([90], dtype=float)
assert cter.amplitude_contrast_to_angle(value).equals(return_value)
def test_minus_100_should_return_ninety(self):
value = pd.Series([-100])
return_value = pd.Series([90], dtype=float)
assert cter.amplitude_contrast_to_angle(value).equals(return_value)
def test_50_should_return_30(self):
value = pd.Series([50])
return_value = pd.Series([30], dtype=float)
data_frame = cter.amplitude_contrast_to_angle(value)
assert return_value.equals(data_frame.round(1))
def test_minus_50_should_return_150(self):
value = pd.Series([-50])
return_value = pd.Series([150], dtype=float)
data_frame = cter.amplitude_contrast_to_angle(value)
assert return_value.equals(data_frame.round(1))
def test_multiline_should_return_correct_values(self):
value = pd.Series([0, 100, -100, 50, -50])
return_value = pd.Series([0, 90, 90, 30, 150], dtype=float)
data_frame = cter.amplitude_contrast_to_angle(value)
assert return_value.equals(data_frame.round(1))
def test_200_should_raise_assertionerror(self):
value = | pd.Series([200]) | pandas.Series |
def featSelect(homepath,cancerpath,normalpath,k=5):
'''
Parameters
----------
`homepath` (str):
Path where you want to save all the generated files
and folders.
`cancerpath` (str):
Path where all the cancer's cancer gene expression
matrix are located.
`normalpath` (str):
Path where all the cancer's normal gene expression
matrix are located.
`k` (int):
The number of top genes you want to choose per
cancer. (default: k=5) you can not put k less than 5
Return:
-------
`names` (list):
Name of cancers found in `cancerpath`.
Outputs:
--------
Creates a folder named "~/std_npy" and all the top `k`
genes from each cancer type is saved as npy file.
'''
import numpy as np
import pandas as pd
from sklearn.feature_selection import SelectKBest, chi2 , f_classif,f_regression,mutual_info_classif,mutual_info_regression
from sklearn.svm import SVR
import os
from os import listdir
from os.path import isfile, join
import warnings
if(k<5):
k=5
warnings.filterwarnings("ignore")
cancerfiles = [f for f in listdir(cancerpath) if isfile(join(cancerpath, f))]
normalfiles = [f for f in listdir(normalpath) if isfile(join(normalpath, f))]
names = []
for f in cancerfiles:
s = f.split('.')
names.append(s[0])
list.sort(cancerfiles)
list.sort(normalfiles)
list.sort(names)
# Directory
directory = "std_npy"
# Parent Directory path
parent_dir = homepath
# Path
path = os.path.join(parent_dir, directory)
if not os.path.exists(path):
os.mkdir(path)
print("Feature selection process is running...")
#reading data and doing work
for index in range(len(cancerfiles)):
Cancer = pd.read_csv(cancerpath+'/'+cancerfiles[index], header=None, index_col=None)
Normal = | pd.read_csv(normalpath+'/'+normalfiles[index], header=None, index_col=None) | pandas.read_csv |
import streamlit as st
from ..global_data import Constants, load_data, load_pred
import pandas as pd
from pathlib import Path
import datetime
# from sklearn.preprocessing import MinMaxScaler
from covid_forecasting_joint_learning.pipeline import main as Pipeline, sird
from covid_forecasting_joint_learning.data import cols as DataCol
from matplotlib import pyplot as plt
from .eval import app as __app
from matplotlib.figure import Figure
from matplotlib.spines import Spines
from covid_forecasting_joint_learning.data.kabko import KabkoData
from covid_forecasting_joint_learning.pipeline.preprocessing import Group
from covid_forecasting_joint_learning.pipeline.clustering import Cluster
from covid_forecasting_joint_learning.model.general import DEFAULT_FUTURE_EXO_COLS, DEFAULT_PAST_COLS
def _app():
return __app(
title="# Forecast",
log_dir="logs/pred",
model_dir="model/pred",
trial_id=-2,
limit_data=False,
val=2,
early_stopping_kwargs={
"rise_patience": 25,
"still_patience": 25,
"both_patience": 75
},
show_loss=False,
show_epoch=True,
show_tb=False
)
@st.cache(
hash_funcs={
KabkoData: id,
Cluster: id,
Group: id,
type(KabkoData): id,
type(load_data): id
},
allow_output_mutation=True
)
def pred(target, model_dir_3):
data_torch = target.datasets_torch[0][0]
target.model.future_length = data_torch[4].size(1)
target.model.eval()
pred_vars = target.model(*data_torch[:5]).detach().numpy()
data_np = target.datasets[0][0]
indices = data_np[-1]
df_vars = pd.DataFrame(pred_vars[0], columns=DataCol.SIRD_VARS, index=indices)
prev = data_torch[5]
pred_final = target.model.rebuild(pred_vars, prev, target.population, sird.rebuild)
indices = data_np[-1]
df_final = pd.DataFrame(pred_final[0], columns=DataCol.IRD, index=indices)
Path(model_dir_3).mkdir(parents=True, exist_ok=True)
df = pd.concat([df_vars, df_final], axis=1)
df.to_excel(f"{model_dir_3}/pred.xlsx", sheet_name="pred")
return df
@st.cache(
hash_funcs={
KabkoData: id,
Cluster: id,
Group: id,
type(KabkoData): id,
type(load_data): id
},
allow_output_mutation=True
)
def save_combined_pred(preds, model_dir_2):
df = | pd.concat(preds) | pandas.concat |
import os
import torch, numpy as np, pandas as pd
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from argparse import ArgumentParser
import h5py
import pytorch_lightning as pl
class S_BERT_Regression(pl.LightningModule):
def __init__(self, hparams):
super(S_BERT_Regression, self).__init__()
# not the best model...
self.hparams = hparams
self.l1 = torch.nn.Linear(768 * 3, 768)
self.l2 = torch.nn.Linear(768, 256)
self.l3 = torch.nn.Linear(256, 1)
self.dropout = torch.nn.Dropout(p=0.2)
def forward(self, x):
f1 = self.dropout(torch.relu(self.l1(x.view(x.size(0), -1))))
f2 = torch.relu(self.l2(f1))
out = self.l3(f2)
return out
def training_step(self, batch, batch_idx):
id, edited, unedited, y = batch
difference = edited - unedited
x = torch.cat((edited, unedited, difference), 1)
y_hat = self.forward(x)
return {'loss': F.mse_loss(y_hat.squeeze(), y)}
def validation_step(self, batch, batch_idx):
id, edited, unedited, y = batch
difference = edited - unedited
x = torch.cat((edited, unedited, difference), 1)
y_hat = self.forward(x)
return {'val_loss': F.mse_loss(y_hat.squeeze(), y)}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
return {'avg_val_loss': avg_loss}
def test_step(self, batch, batch_idx):
id, edited, unedited = batch
difference = edited - unedited
x = torch.cat((edited, unedited, difference), 1)
y_hat = self.forward(x)
return {'pred': y_hat, 'id': id}
def test_end(self, outputs):
all_preds = []
all_ids = []
for x in outputs:
all_preds += list(x['pred'])
all_ids += list(x['id'])
all_preds = [float(ap) for ap in all_preds]
all_ids = [int(ai) for ai in all_ids]
df = | pd.DataFrame(data={'id': all_ids, 'pred': all_preds}) | pandas.DataFrame |
import optparse
import time
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import matplotlib.pyplot as plt
#import matplotlib as mpl
#from matplotlib.projections import PolarAxes
#from matplotlib.ticker import MultipleLocator,FormatStrFormatter
#import os
import katpoint
from katpoint import deg2rad ,rad2deg
from katsdpscripts import git_info
import pandas
from katsdpscripts.reduction.analyse_point_source_scans import batch_mode_analyse_point_source_scans
#from astropy.time import Time
#from matplotlib.dates import DateFormatter
def angle_wrap(angle, period=2.0 * np.pi):
"""Wrap angle into the interval -*period* / 2 ... *period* / 2."""
return (angle + 0.5 * period) % period - 0.5 * period
# These fields contain strings, while the rest of the fields are assumed to contain floats
string_fields = ['dataset', 'target', 'timestamp_ut', 'data_unit']
# Create a date/time string for current time
now = time.strftime('%Y-%m-%d_%Hh%M')
def read_offsetfile(filename):
# Load data file in one shot as an array of strings
string_fields = ['dataset', 'target', 'timestamp_ut', 'data_unit']
data = np.loadtxt(filename, dtype='string', comments='#', delimiter=', ')
# Interpret first non-comment line as header
fields = data[0].tolist()
# By default, all fields are assumed to contain floats
formats = np.tile(np.float, len(fields))
# The string_fields are assumed to contain strings - use data's string type, as it is of sufficient length
formats[[fields.index(name) for name in string_fields if name in fields]] = data.dtype
# Convert to heterogeneous record array
data = np.rec.fromarrays(data[1:].transpose(), dtype=list(zip(fields, formats)))
# Load antenna description string from first line of file and construct antenna object from it
antenna = katpoint.Antenna(file(filename).readline().strip().partition('=')[2])
# Use the pointing model contained in antenna object as the old model (if not overridden by file)
# If the antenna has no model specified, a default null model will be used
return data,antenna
def metrics(model,az,el,measured_delta_az, measured_delta_el ,std_delta_az ,std_delta_el,time_stamps):
"""Determine new residuals and sky RMS from pointing model."""
model_delta_az, model_delta_el = model.offset(az, el)
residual_az = measured_delta_az - model_delta_az
residual_el = measured_delta_el - model_delta_el
residual_xel = residual_az * np.cos(el)
abs_sky_error = rad2deg(np.sqrt(residual_xel ** 2 + residual_el ** 2))
offset_az_ts = pandas.Series(rad2deg(residual_xel), | pandas.to_datetime(time_stamps, unit='s') | pandas.to_datetime |
from __future__ import print_function
import csv
import six
import matplotlib as mpl
import pandas as pd
import numpy as np
from pprint import pprint
from tracerlib import io, core
from tracerlib.io import check_binary
mpl.use('pdf')
import re
import seaborn as sns
from matplotlib import pyplot as plt
from tracerlib import base_dir
from tracerlib import tracer_func
try:
from configparser import ConfigParser, NoOptionError
except ImportError:
from ConfigParser import ConfigParser, NoOptionError
import argparse
import sys
import os
import subprocess
import glob
import shutil
from collections import defaultdict, Counter
from time import sleep
import warnings
import pickle
from prettytable import PrettyTable
from Bio.Seq import Seq
from Bio import SeqIO
import itertools
import pdb
from numpy import percentile, array
from matplotlib.colors import hex2color, rgb2hex
import random
import copy
import colorsys
class TracerTask(object):
base_parser = argparse.ArgumentParser(add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
base_parser.add_argument('--ncores', '-p', metavar="<CORES>",
help='number of processor cores to use', type=int,
default=1)
base_parser.add_argument('--config_file', '-c', metavar="<CONFIG_FILE>",
help='config file to use',
default=None)
base_parser.add_argument('--resource_dir', metavar="<RESOURCE_DIR>",
help='root directory for resources', default=None)
config = None
def run(self):
pass
def get_binary(self, name):
tool_key = name.lower() + '_path'
user_path = None
if self.config.has_option('tool_locations', tool_key):
user_path = self.resolve_relative_path(
self.config.get('tool_locations', tool_key))
return check_binary(name, user_path)
def get_tracer_path(self):
tracer_path = None
if self.config.has_option('tracer_location', 'tracer_path'):
path = self.config.get('tracer_location', 'tracer_path')
if os.path.exists(path):
tracer_path = path
else:
print("Please specify the path to where you originally"
" downloaded TraCeR in the config file.")
return tracer_path
def read_config(self, config_file):
# First look for environmental variable
if not config_file:
config_file = os.environ.get('TRACER_CONF', None)
if config_file is not None:
config_file = os.path.expanduser(config_file)
if not os.path.isfile(config_file):
config_file = None
# Then check the default location
if not config_file:
config_file = os.path.expanduser('~/.tracerrc')
if not os.path.isfile(config_file):
print("Config file not found at ~/.tracerrc."
" Using default tracer.conf in repo...")
tracer_path = self.get_tracer_path()
config_file = os.path.join(tracer_path, 'tracer.conf')
if not os.path.isfile(config_file):
config_file = os.path.join(base_dir, 'tracer.conf')
tracer_func.check_config_file(config_file)
config = ConfigParser()
config.read(config_file)
return config
def resolve_relative_path(self, path):
if not path.startswith("/"):
base_directory = os.path.abspath(os.path.dirname(__file__))
full_path = os.path.normpath(
"/{}/../{}".format(base_directory, path))
else:
full_path = path
return full_path
def print_cell_summary(self, cell, output_file, receptor_name, loci):
out_file = open(output_file, 'w')
out_file.write(
'------------------\n{name}\n------------------\n'.format(
name=cell.name))
# summarise the productive/total recombinants
for l in loci:
out_file.write(
'{receptor}_{locus} recombinants: {summary}\n'.format(
receptor=receptor_name, locus=l,
summary=cell.summarise_productivity(receptor_name, l)))
out_file.write('\n\n')
for l in loci:
out_file.write(
"#{receptor}_{locus}#\n".format(receptor=receptor_name,
locus=l))
rs = cell.recombinants[receptor_name][l]
if rs is None:
out_file.write(
"No {receptor}_{locus} recombinants found\n\n".format(
receptor=receptor_name, locus=l))
else:
for r in rs:
out_file.write(r.get_summary())
out_file.write("\n\n")
# out_file.write('#TCRA#\n')
# if cell.A_recombinants is None:
# out_file.write("No TCRA recombinants found\n\n")
# else:
# for rec in cell.A_recombinants:
# out_file.write(rec.get_summary())
# out_file.write("\n\n")
# out_file.write('#TCRB#\n')
#
# if cell.B_recombinants is None:
# out_file.write("No TCRB recombinants found\n\n")
# else:
# for rec in cell.B_recombinants:
# out_file.write(rec.get_summary())
# out_file.write("\n\n")
out_file.close()
def die_with_empty_cell(self, cell_name, output_dir, species):
print("##No recombinants found##")
cell = core.Cell(cell_name, None, is_empty=True, species=species,
receptor=self.receptor_name, loci=self.loci)
self.print_cell_summary(
cell,
"{output_dir}/unfiltered_{receptor}_seqs/unfiltered_{receptor}s.txt".format(
output_dir=self.output_dir,
receptor=self.receptor_name),
self.receptor_name, self.loci)
# Save cell in a pickle
with open(
"{output_dir}/unfiltered_{receptor}_seqs/{cell_name}.pkl".format(
output_dir=self.output_dir,
cell_name=cell.name,
receptor=self.receptor_name), 'wb') as pf:
pickle.dump(cell, pf, protocol=0)
cell.filter_recombinants()
self.print_cell_summary(
cell,
"{output_dir}/filtered_{receptor}_seqs/filtered_{receptor}s.txt".format(
output_dir=self.output_dir,
receptor=self.receptor_name),
self.receptor_name, self.loci)
with open(
"{output_dir}/filtered_{receptor}_seqs/{cell_name}.pkl".format(
output_dir=self.output_dir,
cell_name=cell.name,
receptor=self.receptor_name), 'wb') as pf:
pickle.dump(cell, pf, protocol=0)
exit(0)
def get_species_root(self, species, root=None, build_mode = False):
if root is None:
tracer_path = self.get_tracer_path()
if tracer_path is not None:
resources_root = os.path.join(tracer_path, 'resources', species)
else:
# Look for resources in base directory if tracer_path is not specified
resources_root = os.path.join(base_dir, 'resources', species)
else:
resources_root = os.path.join(root, species)
if not build_mode:
assert os.path.isdir(resources_root), "Species not found in resources"
return (resources_root)
# def get_available_species(self, root=None):
# if root is None:
# resources_dir = os.path.join(base_dir, 'resources')
# else:
# resources_dir = root
# species_dirs = next(os.walk(resources_dir))[1]
# return (species_dirs)
class Assembler(TracerTask):
def __init__(self, **kwargs):
if not kwargs:
# get list of all available species in resources
parser = argparse.ArgumentParser(
description="Reconstruct TCR sequences from RNAseq reads for a single cell",
parents=[self.base_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--resume_with_existing_files', '-r',
help='look for existing intermediate files and use those instead of starting from scratch',
action="store_true")
parser.add_argument('--species', '-s',
help='Species to use for reconstruction',
default='Mmus')
parser.add_argument('--receptor_name',
help="Name of receptor to reconstruct",
default='TCR')
parser.add_argument('--loci',
help="Space-separated list of loci to reconstruct for receptor",
default=['A', 'B'], nargs='+')
parser.add_argument('--seq_method', '-m',
help='Method for constructing sequence to assess productivity, \
quantify expression and for output reporting. See README for details.',
choices=['imgt', 'assembly'], default='imgt')
parser.add_argument('--quant_method', '-q',
help='Method for expression quantification. See README for details.',
choices=['kallisto', 'salmon'],
default='kallisto')
parser.add_argument('--small_index',
help='Set this to speed up expression quantification by using a smaller index. See README for details.',
action="store_true")
parser.add_argument('--single_end',
help='set this if your sequencing data are single-end reads',
action="store_true")
parser.add_argument('--fragment_length',
help='Estimated average fragment length in the sequencing library.'
' Used for Kallisto quantification. REQUIRED for single-end data.',
default=False)
parser.add_argument('--fragment_sd',
help='Estimated standard deviation of average fragment length in the sequencing library.'
' Used for Kallisto quantification. REQUIRED for single-end data.',
default=False)
parser.add_argument('--max_junc_len',
help="Maximum permitted length of junction string in recombinant identifier. "
"Used to filter out artefacts. May need to be longer for TCRdelta.",
default=50, type=int)
parser.add_argument('fastq1', metavar="<FASTQ1>",
help='first fastq file')
parser.add_argument('fastq2', metavar="<FASTQ2>",
help='second fastq file', nargs='?')
parser.add_argument('cell_name', metavar="<CELL_NAME>",
help='name of cell for file labels')
parser.add_argument('output_dir', metavar="<OUTPUT_DIR>",
help='directory for output as <output_dir>/<cell_name>')
args = parser.parse_args(sys.argv[2:])
resource_dir = args.resource_dir
self.cell_name = args.cell_name
self.fastq1 = args.fastq1
self.single_end = args.single_end
self.fastq2 = args.fastq2
self.ncores = str(args.ncores)
self.species = args.species
self.seq_method = args.seq_method
self.quant_method = args.quant_method
self.small_index = args.small_index
self.resume_with_existing_files = args.resume_with_existing_files
self.fragment_length = args.fragment_length
self.fragment_sd = args.fragment_sd
self.output_dir = args.output_dir
self.receptor_name = args.receptor_name
self.loci = args.loci
self.max_junc_len = args.max_junc_len
config_file = args.config_file
else:
resource_dir = kwargs.get('resource_dir')
self.cell_name = kwargs.get('cell_name')
self.fastq1 = kwargs.get('fastq1')
self.fastq2 = kwargs.get('fastq2')
self.ncores = kwargs.get('ncores')
self.species = kwargs.get('species')
self.seq_method = kwargs.get('seq_method')
self.quant_method = kwargs.get('quant_method')
self.small_index = kwargs.get('small_index')
self.resume_with_existing_files = kwargs.get(
'resume_with_existing_files')
self.output_dir = kwargs.get('output_dir')
self.single_end = kwargs.get('single_end')
self.fragment_length = kwargs.get('fragment_length')
self.fragment_sd = kwargs.get('fragment_sd')
self.receptor_name = kwargs.get('receptor_name')
self.loci = kwargs.get('loci')
self.max_junc_len = kwargs.get('max_junc_len')
config_file = kwargs.get('config_file')
self.config = self.read_config(config_file)
self.species_root = self.get_species_root(self.species,
root=resource_dir)
# self.locus_names = ["TCRA", "TCRB"]
# Check the fastq config is correct
if not self.single_end:
assert self.fastq2, "Only one fastq file specified. Either set --single_end or provide second fastq."
else:
self.fastq2 = None
if self.fastq2:
print(
"Two fastq files given with --single-end option. Ignoring second file.")
assert self.fragment_length and self.fragment_sd, \
'Must specify estimated average fragment length (--fragment_length)' \
' and standard deviation (--fragment_sd) for use with single-end data'
assert self.fragment_length, \
'Must specify estimated average fragment length (--fragment_length) for use with single-end data'
assert self.fragment_sd, \
'Must specify estimated fragment length standard deviation (--fragment_sd) for use with single-end data'
# Check FASTQ files exist
if not os.path.isfile(self.fastq1):
raise OSError('2', 'FASTQ file not found', self.fastq1)
if not self.single_end and self.fastq2:
if not os.path.isfile(self.fastq2):
raise OSError('2', 'FASTQ file not found', self.fastq2)
def run(self, **kwargs):
# Set-up output directories
root_output_dir = os.path.abspath(self.output_dir)
io.makeOutputDir(root_output_dir)
self.output_dir = root_output_dir + "/" + self.cell_name
io.makeOutputDir(self.output_dir)
data_dirs = ['aligned_reads', 'Trinity_output', 'IgBLAST_output',
'unfiltered_{receptor}_seqs'.format(
receptor=self.receptor_name),
'expression_quantification',
'filtered_{receptor}_seqs'.format(
receptor=self.receptor_name)]
for d in data_dirs:
io.makeOutputDir("{}/{}".format(self.output_dir, d))
# Perform TraCeR's core functions
self.align()
self.de_novo_assemble()
cell = self.ig_blast()
self.quantify(cell)
fasta_filename = "{output_dir}/unfiltered_{receptor}_seqs/{cell_name}_{receptor}seqs.fa".format(
output_dir=self.output_dir,
cell_name=self.cell_name,
receptor=self.receptor_name)
fasta_file = open(fasta_filename, 'w')
fasta_file.write(cell.get_fasta_string())
fasta_file.close()
self.print_cell_summary(
cell,
"{output_dir}/unfiltered_{receptor}_seqs/unfiltered_{receptor}s.txt".format(
output_dir=self.output_dir,
receptor=self.receptor_name),
self.receptor_name, self.loci)
# Save cell in a pickle
with open(
"{output_dir}/unfiltered_{receptor}_seqs/{cell_name}.pkl".format(
output_dir=self.output_dir,
cell_name=cell.name,
receptor=self.receptor_name), 'wb') as pf:
pickle.dump(cell, pf, protocol=0)
print("##Filtering by read count##")
cell.filter_recombinants()
fasta_filename = "{output_dir}/filtered_{receptor}_seqs/{cell_name}_{receptor}seqs.fa".format(
output_dir=self.output_dir,
cell_name=self.cell_name,
receptor=self.receptor_name)
fasta_file = open(fasta_filename, 'w')
fasta_file.write(cell.get_fasta_string())
fasta_file.close()
self.print_cell_summary(
cell,
"{output_dir}/filtered_{receptor}_seqs/filtered_{receptor}s.txt".format(
output_dir=self.output_dir,
receptor=self.receptor_name),
self.receptor_name, self.loci)
with open(
"{output_dir}/filtered_{receptor}_seqs/{cell_name}.pkl".format(
output_dir=self.output_dir,
cell_name=cell.name,
receptor=self.receptor_name), 'wb') as pf:
pickle.dump(cell, pf, protocol=0)
def align(self):
bowtie2 = self.get_binary('bowtie2')
synthetic_genome_path = os.path.join(self.species_root,
'combinatorial_recombinomes')
# Align with bowtie
tracer_func.bowtie2_alignment(
bowtie2, self.ncores, self.receptor_name, self.loci,
self.output_dir, self.cell_name,
synthetic_genome_path, self.fastq1, self.fastq2,
self.resume_with_existing_files, self.single_end)
print()
def de_novo_assemble(self):
try:
trinity = self.get_binary('trinity')
except OSError:
trinity = self.get_binary('Trinity')
# Trinity version
if not self.config.has_option('trinity_options', 'trinity_version'):
try:
subprocess.check_output([trinity, '--version'])
except subprocess.CalledProcessError as err:
if re.search('v2', err.output.decode('utf-8')):
self.config.set('trinity_options', 'trinity_version', '2')
else:
self.config.set('trinity_options', 'trinity_version', '1')
if self.config.has_option('trinity_options', 'trinity_grid_conf'):
trinity_grid_conf = self.resolve_relative_path(
self.config.get('trinity_options', 'trinity_grid_conf'))
else:
trinity_grid_conf = False
# Is Trinity version compatible with --no_normalize_reads argument
no_normalise = False
trinity_version = self.config.get('trinity_options', 'trinity_version')
if trinity_version == '2':
try:
subprocess.check_output([trinity, '--version'])
except subprocess.CalledProcessError as err:
first_line = err.output.decode('utf-8').split("\n")[0]
m = re.search(r'v(\d+\.\d+\.?\d*)', first_line)
if m is not None:
minor_version = int(m.group()[1:].split(".")[1])
if minor_version >= 3:
no_normalise = True
else:
no_normalise = False
# De novo assembly with trinity
trinity_JM = self.config.get('trinity_options', 'max_jellyfish_memory')
trinity_version = self.config.get('trinity_options', 'trinity_version')
successful_files = tracer_func.assemble_with_trinity(
trinity, self.receptor_name, self.loci, self.output_dir,
self.cell_name, self.ncores, trinity_grid_conf,
trinity_JM, trinity_version, self.resume_with_existing_files,
self.single_end, self.species, no_normalise, self.config)
if len(successful_files) == 0:
print("No successful Trinity assemblies")
self.die_with_empty_cell(self.cell_name, self.output_dir,
self.species)
print()
def ig_blast(self):
igblastn = self.get_binary('igblastn')
# Reference data locations
igblast_index_location = os.path.join(self.species_root, 'igblast_dbs')
imgt_seq_location = os.path.join(self.species_root, 'raw_seqs')
igblast_seqtype = self.config.get('IgBlast_options', 'igblast_seqtype')
# IgBlast of assembled contigs - run twice. Once with output format 3 and once with output format 7
for fmt in (str(3),str(7)):
tracer_func.run_IgBlast(igblastn, self.receptor_name, self.loci,
self.output_dir, self.cell_name,
igblast_index_location,
igblast_seqtype, self.species,
self.resume_with_existing_files, fmt)
print()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# cell = io.parse_IgBLAST(self.receptor_name, self.loci, self.output_dir, self.cell_name, imgt_seq_location,
# self.species, self.seq_method, self.invariant_sequences)
cell = io.parse_IgBLAST(self.receptor_name, self.loci,
self.output_dir, self.cell_name,
imgt_seq_location,
self.species, self.seq_method,
self.max_junc_len)
if cell.is_empty:
self.die_with_empty_cell(self.cell_name, self.output_dir,
self.species)
return cell
def quantify(self, cell):
if not self.config.has_option('base_transcriptomes', self.species):
raise OSError("No transcriptome reference specified for {species}."
" Please specify location in config file."
.format(species=self.species))
else:
base_transcriptome = self.resolve_relative_path(
self.config.get('base_transcriptomes', self.species))
if not os.path.isfile(base_transcriptome):
raise OSError(
'2', 'Transcriptome reference not found', base_transcriptome)
# set up Salmon/Kallisto parameters
if self.quant_method == 'salmon':
salmon = self.get_binary('salmon')
if self.config.has_option('salmon_options', 'libType'):
salmon_libType = self.config.get('salmon_options', 'libType')
else:
print("No library type specified for salmon "
"in the configuration file. Using automatic detection "
"(--libType A).")
salmon_libType = 'A'
if self.config.has_option('salmon_options', 'kmerLen'):
salmon_kmerLen = self.config.get('salmon_options', 'kmerLen')
else:
print("No kmer length specified for salmon "
"in the configuration file. Using default value of 31.")
salmon_kmerLen = 31
else:
kallisto = self.get_binary('kallisto')
# small-index method: filter base_transcriptome -> ref_transcriptome
if self.small_index:
if not self.config.has_option(
"{}{}".format(self.quant_method, '_base_indices'),
self.species):
raise OSError(
"No {method} {species} reference index specified to be used"
" with option --small_index. "
"Please specify location in config file.".format(
method=self.quant_method, species=self.species))
else:
base_index = self.resolve_relative_path(
self.config.get(
"{}{}".format(self.quant_method, '_base_indices'),
self.species))
if not os.path.exists(base_index):
raise OSError('2', 'Reference index not found', base_index)
smind_dir = "{}/{}".format(self.output_dir,
'expression_quantification_from_base_index')
new_ref_transcriptome = "{}/{}".format(smind_dir, 'newref.fa')
io.makeOutputDir(smind_dir)
if self.quant_method == 'salmon':
tracer_func.quantify_with_salmon_from_index(
salmon, cell, smind_dir, self.cell_name, base_index,
self.fastq1, self.fastq2, self.ncores,
self.resume_with_existing_files, self.single_end,
self.fragment_length, self.fragment_sd,
salmon_libType, salmon_kmerLen)
quantfile = "{}/{}".format(smind_dir, 'quant.sf')
tpmcol = 3 # TPM col = 3 in quant.sf
else: # use kallisto
tracer_func.quantify_with_kallisto_from_index(
kallisto, cell, smind_dir, self.cell_name, base_index,
self.fastq1, self.fastq2, self.ncores,
self.resume_with_existing_files, self.single_end,
self.fragment_length, self.fragment_sd)
quantfile = "{}/{}".format(smind_dir, 'abundance.tsv')
tpmcol = 4 # TPM col = 4 in abundance.tsv
tracer_func.extract_newref_from_quant(
base_transcriptome, quantfile, tpmcol, new_ref_transcriptome)
ref_transcriptome = new_ref_transcriptome
else:
# use entire base_transcriptome for index construction as usual
ref_transcriptome = base_transcriptome
# actual quantification of TCR Seq
if self.quant_method == 'salmon':
tracer_func.quantify_with_salmon(
salmon, cell, self.output_dir, self.cell_name, ref_transcriptome,
self.fastq1, self.fastq2, self.ncores,
self.resume_with_existing_files,
self.single_end, self.fragment_length,
self.fragment_sd, salmon_libType, salmon_kmerLen)
print()
counts = tracer_func.load_salmon_counts(
"{}/expression_quantification/quant.sf".format(self.output_dir))
else:
tracer_func.quantify_with_kallisto(
kallisto, cell, self.output_dir, self.cell_name,
ref_transcriptome, self.fastq1, self.fastq2, self.ncores,
self.resume_with_existing_files, self.single_end,
self.fragment_length, self.fragment_sd)
print()
counts = tracer_func.load_kallisto_counts(
"{}/expression_quantification/abundance.tsv".format(self.output_dir))
if self.small_index:
os.remove(new_ref_transcriptome) # remove filtered reference file
for receptor, locus_dict in six.iteritems(cell.recombinants):
for locus, recombinants in six.iteritems(locus_dict):
if recombinants is not None:
for rec in recombinants:
tpm = counts[receptor][locus][rec.contig_name]
rec.TPM = tpm
class Summariser(TracerTask):
def __init__(self, **kwargs):
if not kwargs:
parser = argparse.ArgumentParser(
description="Summarise set of cells with reconstructed TCR sequences",
parents=[self.base_parser])
parser.add_argument('--species', '-s',
help='Species to use for reconstruction',
default='Mmus')
parser.add_argument('--receptor_name',
help="Name of receptor to summarise",
default='TCR')
parser.add_argument('--loci',
help="Space-separated list of loci to summarise for receptor",
default=['A', 'B'], nargs='+')
parser.add_argument('--use_unfiltered', '-u',
help='use unfiltered recombinants',
action="store_true")
parser.add_argument('--keep_invariant', '-i',
help='ignore invariant cells when constructing networks',
action="store_true")
parser.add_argument('--graph_format', '-f',
metavar="<GRAPH_FORMAT>",
help='graphviz output format [pdf]',
default='pdf')
parser.add_argument('--no_networks',
help='skip attempts to draw network graphs',
action="store_true")
parser.add_argument('dir', metavar="<DIR>",
help='directory containing subdirectories for each cell to be summarised')
args = parser.parse_args(sys.argv[2:])
resource_dir = args.resource_dir
self.root_dir = os.path.abspath(args.dir)
self.graph_format = args.graph_format
self.keep_invariant = args.keep_invariant
self.use_unfiltered = args.use_unfiltered
self.draw_graphs = not args.no_networks
self.receptor_name = args.receptor_name
self.loci = args.loci
self.species = args.species
config_file = args.config_file
else:
resource_dir = kwargs.get('resource_dir')
self.use_unfiltered = kwargs.get('use_unfiltered')
self.root_dir = os.path.abspath(kwargs.get('root_dir'))
self.draw_graphs = not (kwargs.get('no_networks'))
self.graph_format = kwargs.get('graph_format')
self.keep_invariant = kwargs.get('keep_invariant')
self.receptor_name = kwargs.get('receptor_name')
self.loci = kwargs.get('loci')
self.species = kwargs.get('species')
config_file = kwargs.get('config_file')
# Read config file
self.config = self.read_config(config_file)
self.species_dir = self.get_species_root(self.species,
root=resource_dir)
invariant_cells = os.path.join(self.species_dir, 'invariant_cells.json')
if os.path.isfile(invariant_cells):
self.invariant_cells = io.parse_invariant_cells(invariant_cells)
else:
self.invariant_cells = None
def run(self):
if self.draw_graphs:
dot = self.get_binary('dot')
neato = self.get_binary('neato')
#dot = self.resolve_relative_path(
# self.config.get('tool_locations', 'dot_path'))
#neato = self.resolve_relative_path(
# self.config.get('tool_locations', 'neato_path'))
#
## check that executables from config file can be used
#not_executable = []
#for name, x in six.iteritems({"dot": dot, "neato": neato}):
# if not io.is_exe(x):
# not_executable.append((name, x))
#if len(not_executable) > 0:
# print()
# print("Could not execute the following required tools."
# " Check your configuration file.")
# for t in not_executable:
# print(t[0], t[1])
# print()
# exit(1)
else:
dot = ""
neato = ""
cells = {}
empty_cells = []
subdirectories = next(os.walk(self.root_dir))[1]
if self.use_unfiltered:
pkl_dir = "unfiltered_{}_seqs".format(self.receptor_name)
outdir = "{}/unfiltered_{}_summary".format(
self.root_dir, self.receptor_name + "".join(self.loci))
# outfile = open("{root_dir}/unfiltered_TCR_summary.txt".format(root_dir=root_dir), 'w')
# length_filename_root = "{}/unfiltered_reconstructed_lengths_TCR".format(root_dir)
else:
pkl_dir = "filtered_{}_seqs".format(self.receptor_name)
outdir = "{}/filtered_{}_summary".format(
self.root_dir, self.receptor_name + "".join(self.loci))
# outfile = open("{root_dir}/filtered_TCR_summary.txt".format(root_dir=root_dir), 'w')
# length_filename_root = "{}/filtered_reconstructed_lengths_TCR".format(root_dir)
io.makeOutputDir(outdir)
outfile = open("{}/{}_summary.txt".format(outdir, self.receptor_name),
'w')
length_filename_root = "{}/reconstructed_lengths_{}".format(outdir,
self.receptor_name)
for d in subdirectories:
cell_pkl = "{root_dir}/{d}/{pkl_dir}/{d}.pkl".format(
pkl_dir=pkl_dir, d=d, root_dir=self.root_dir)
if os.path.isfile(cell_pkl):
with open(cell_pkl, 'rb') as pkl:
cl = pickle.load(pkl)
cells[d] = cl
if cl.is_empty or cl.missing_loci_of_interest(
self.receptor_name, self.loci):
empty_cells.append(d)
cell_recovery_count = dict()
# count cells with productive chains for each locus and for each
# possible pair
for l in self.loci:
cell_recovery_count[l] = 0
possible_pairs = ["".join(x) for x in
itertools.combinations(self.loci, 2)]
for p in possible_pairs:
cell_recovery_count[p] = 0
for cell_name, cell in six.iteritems(cells):
prod_counts = dict()
for l in self.loci:
prod_counts[l] = cell.count_productive_recombinants(
self.receptor_name, l)
if prod_counts[l] > 0:
cell_recovery_count[l] += 1
for p in possible_pairs:
if prod_counts[p[0]] > 0 and prod_counts[p[1]] > 0:
cell_recovery_count[p] += 1
total_cells = len(cells)
for l in self.loci:
count = cell_recovery_count[l]
pc = round((count / float(total_cells)) * 100, 1)
outfile.write(
"{receptor}_{locus} reconstruction:\t{count} / {total} ({pc}%)\n".format(
receptor=self.receptor_name,
locus=l, count=count,
total=total_cells, pc=pc))
outfile.write("\n")
for p in possible_pairs:
count = cell_recovery_count[p]
pc = round((count / float(total_cells)) * 100, 1)
outfile.write(
"{p} productive reconstruction:\t{count} / {total} ({pc}%)\n".format(
p=p,
count=count,
total=total_cells, pc=pc))
outfile.write("\n")
all_counters = defaultdict(Counter)
prod_counters = defaultdict(Counter)
for cell in cells.values():
for l in self.loci:
all_counters[l].update(
{cell.count_total_recombinants(self.receptor_name, l): 1})
prod_counters[l].update({cell.count_productive_recombinants(
self.receptor_name, l): 1})
all_recombinant_counts = []
for locus in all_counters:
all_recombinant_counts = all_recombinant_counts + \
list(all_counters[locus].keys())
max_recombinant_count = max(all_recombinant_counts)
# max_recombinant_count = max(list(counters['all_alpha'].keys()) + list(counters['all_beta'].keys()))
table_header = ['', '0 recombinants',
'1 recombinant', '2 recombinants']
recomb_range = range(0, 3)
if max_recombinant_count > 2:
extra_header = [str(x) + " recombinants" for x in
range(3, max_recombinant_count + 1)]
table_header = table_header + extra_header
recomb_range = range(0, max_recombinant_count + 1)
t = PrettyTable(table_header)
t.padding_width = 1
t.align = "l"
# make all recombinant table
for counter_name in ['all_counters', 'prod_counters']:
counter_type = counter_name.split("_")[0]
counter_set = eval(counter_name)
for l in self.loci:
counter = counter_set[l]
count_array = [counter[x] for x in recomb_range]
total_with_at_least_one = sum(count_array[1:])
if total_with_at_least_one > 0:
percentages = [''] + [" (" + str(round(
(float(x) / total_with_at_least_one) * 100)) + "%)" for
x in
count_array[1:]]
else:
percentages = [''] + [" (N/A%)" for x in count_array[1:]]
row = []
for i in recomb_range:
row.append(str(count_array[i]) + percentages[i])
label = '{} {}'.format(counter_type, l)
t.add_row([label] + row)
outfile.write(t.get_string())
outfile.write("\n")
# If using unfiltered, name cells with more than two recombinants#
if self.use_unfiltered:
outfile.write(
"\n\n#Cells with more than two recombinants for a locus#\n")
found_multi = False
for cell in cells.values():
# if cell.count_total_recombinants('A') > 2 or
# cell.count_total_recombinants('B') > 2:
if cell.has_excess_recombinants(2):
outfile.write("###{}###\n".format(cell.name))
for l in self.loci:
count = cell.count_total_recombinants(
self.receptor_name, l)
outfile.write("{receptor}_{l}:\t{count}\n".format(
receptor=self.receptor_name, l=l, count=count))
outfile.write("\n")
found_multi = True
if not found_multi:
outfile.write("None\n\n")
# Reporting iNKT cells
# iNKT_count = len(NKT_cells)
# if iNKT_count == 1:
# cell_word = 'cell'
# else:
# cell_word = 'cells'
# outfile.write("\n\n#iNKT cells#\nFound {iNKT_count} iNKT {cell_word}\n".format(iNKT_count=iNKT_count,
# cell_word=cell_word))
# if iNKT_count > 0:
# for cell_name, ids in six.iteritems(NKT_cells):
# outfile.write("###{cell_name}###\n".format(cell_name=cell_name))
# outfile.write("TCRA:\t{}\nTCRB\t{}\n\n".format(ids[0], ids[1]))
#
# reporting invariant cells
invariant_cells = []
if self.invariant_cells is not None:
for ivc in self.invariant_cells:
ivc_loci = []
found_ivcs = {}
defining_locus = ivc.defining_locus
if defining_locus in self.loci:
ivc_loci.append(defining_locus)
for cell in cells.values():
found_idents = {}
found_defining_locus, defining_id = ivc.check_for_match(
cell, defining_locus)
if found_defining_locus:
found_idents[ivc.defining_locus] = defining_id
for l in ivc.invariant_recombinants.keys():
if not l == defining_locus:
ivc_loci.append(l)
if l in cell.recombinants[
ivc.receptor_type] and \
cell.recombinants[
ivc.receptor_type][
l] is not None:
found_other_locus, invar_id = ivc.check_for_match(
cell, l)
if found_other_locus:
pass
else:
invar_id = "Invariant recombinant not found for {}_{}. {} found in total ({})".format(
ivc.receptor_type, l, len(
cell.recombinants[
ivc.receptor_type][l]),
cell.getMainRecombinantIdentifiersForLocus(
ivc.receptor_type, l))
else:
invar_id = "No sequences reconstructed for {}_{}".format(
ivc.receptor_type, l)
found_idents[l] = invar_id
found_ivcs[cell.name] = found_idents
invariant_cells.append(cell.name)
if len(found_ivcs) > 0:
outfile.write("\n#{} cells#\n".format(ivc.name))
outfile.write("Expected: {}\n".format(ivc.expected_string))
outfile.write(
"Found {} possible cells.\n\n".format(len(found_ivcs)))
sorted_names = sorted(list(found_ivcs.keys()))
for n in sorted_names:
outfile.write("### {} ###\n".format(n))
ivc_details = found_ivcs[n]
for l in ivc_loci:
outfile.write(
"{}_{}: {}\n".format(ivc.receptor_type, l,
ivc_details[l]))
outfile.write("\n")
# plot lengths of reconstructed sequences
lengths = defaultdict(list)
for cell in cells.values():
for l in self.loci:
lengths[l].extend(
cell.get_trinity_lengths(self.receptor_name, l))
# plot length distributions
quartiles = dict()
for l in self.loci:
q = self.get_quartiles(self.receptor_name, l)
quartiles[l] = q
for l in self.loci:
q = quartiles[l]
lns = lengths[l]
try:
if len(lns) > 1:
plt.figure()
plt.axvline(q[0], linestyle="--", color='k')
plt.axvline(q[1], linestyle="--", color='k')
sns.distplot(lns)
sns.despine()
plt.xlabel(
"{receptor}_{locus} reconstructed length (bp)".format(
receptor=self.receptor_name,
locus=l))
plt.ylabel("Density")
plt.savefig("{}_{}.pdf".format(length_filename_root, l))
if len(lns) > 0:
with open("{}_{}.txt".format(length_filename_root, l),
'w') as f:
for l in sorted(lns):
f.write("{}\n".format(l))
except:
print(self.receptor_name)
print(type(lns))
print(lns)
for cell_name in empty_cells:
del cells[cell_name]
if not self.keep_invariant:
for cell_name in invariant_cells:
del cells[cell_name]
recombinant_data = []
# Write out recombinant details for each cell
with open("{}/recombinants.txt".format(outdir), 'w') as f:
f.write(
"cell_name\tlocus\trecombinant_id\tproductive\treconstructed_length\tCDR3aa\tCDR3nt\n")
sorted_cell_names = sorted(list(cells.keys()))
for cell_name in sorted_cell_names:
cell = cells[cell_name]
cell_data = {"cell_name": cell_name}
for locus in self.loci:
cell_data.update({locus + "_unproductive": None,
locus + "_productive": None})
recombinants = cell.recombinants[self.receptor_name][locus]
if recombinants is not None:
for r in recombinants:
# check for cdr3nt attribute (to make backwards compatible)
if hasattr(r, 'cdr3nt'):
cdr3nt = r.cdr3nt
cdr3 = r.cdr3
# lowercase CDR3 sequences if non-productive
if not r.productive:
cdr3 = cdr3.lower()
cdr3nt = cdr3nt.lower()
else:
cdr3nt = 'N/A'
cdr3 = 'N/A'
f.write(
"{name}\t{locus}\t{ident}\t{productive}\t{length}\t{cdr3}\t{cdr3nt}\n".format(
name=cell_name, locus=locus,
ident=r.identifier,
productive=r.productive,
length=len(r.trinity_seq),
cdr3=cdr3,
cdr3nt=cdr3nt))
if r.productive:
cell_data[locus + "_productive"] = r.identifier
else:
cell_data[
locus + "_unproductive"] = r.identifier
f.write("\n")
recombinant_data.append(cell_data)
f.write("\n\n")
for cell_name in empty_cells:
f.write(
"{cell_name}\tNo seqs found for {receptor}_{loci}\n".format(
cell_name=cell_name,
receptor=self.receptor_name,
loci=self.loci))
#pdb.set_trace()
recombinant_data = pd.DataFrame(recombinant_data)
# make clonotype networks
network_colours = io.read_colour_file(
os.path.join(self.species_dir, "colours.csv"))
component_groups = tracer_func.draw_network_from_cells(cells, outdir,
self.graph_format,
dot, neato,
self.draw_graphs,
self.receptor_name,
self.loci,
network_colours)
# Print component groups to the summary#
outfile.write(
"\n#Clonotype groups#\n"
"This is a text representation of the groups shown in clonotype_network_with_identifiers.pdf.\n"
"It does not exclude cells that only share beta and not alpha.\n\n")
for g in component_groups:
outfile.write(", ".join(g))
outfile.write("\n\n")
# Build group membership dictionary
group_membership = []
for index, group in enumerate(component_groups):
group_len = len(group)
for cell in group:
group_membership.append({"cell_name": cell,
"clonal_group": index,
"group_size": group_len})
group_membership = | pd.DataFrame(group_membership) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import dash
from dash import dcc
from dash import html
import plotly.express as px
from ast import literal_eval
from dash.dependencies import Input, Output
smd = | pd.read_csv("data/data_for_dash.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import csv
import os
import platform
import codecs
import re
import sys
from datetime import datetime
import pytest
import numpy as np
from pandas._libs.lib import Timestamp
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex
from pandas import compat
from pandas.compat import (StringIO, BytesIO, PY3,
range, lrange, u)
from pandas.errors import DtypeWarning, EmptyDataError, ParserError
from pandas.io.common import URLError
from pandas.io.parsers import TextFileReader, TextParser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
# Parsers support only length-1 decimals
msg = 'Only length-1 decimal markers supported'
with tm.assert_raises_regex(ValueError, msg):
self.read_csv(StringIO(data), decimal='')
def test_bad_stream_exception(self):
# Issue 13652:
# This test validates that both python engine
# and C engine will raise UnicodeDecodeError instead of
# c engine raising ParserError and swallowing exception
# that caused read to fail.
handle = open(self.csv_shiftjs, "rb")
codec = codecs.lookup("utf-8")
utf8 = codecs.lookup('utf-8')
# stream must be binary UTF8
stream = codecs.StreamRecoder(
handle, utf8.encode, utf8.decode, codec.streamreader,
codec.streamwriter)
if compat.PY3:
msg = "'utf-8' codec can't decode byte"
else:
msg = "'utf8' codec can't decode byte"
with tm.assert_raises_regex(UnicodeDecodeError, msg):
self.read_csv(stream)
stream.close()
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
self.read_csv(fname, index_col=0, parse_dates=True)
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
assert isinstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# see gh-8217
# Series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
assert not result._is_view
def test_malformed(self):
# see gh-6607
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#')
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
it.read(5)
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read(3)
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
msg = 'Expected 3 fields in line 6, saw 5'
with tm.assert_raises_regex(Exception, msg):
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
it.read()
# skipfooter is not supported with the C parser yet
if self.engine == 'python':
# skipfooter
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
msg = 'Expected 3 fields in line 4, saw 5'
with tm.assert_raises_regex(Exception, msg):
self.read_table(StringIO(data), sep=',',
header=1, comment='#',
skipfooter=1)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>""" # noqa
pytest.raises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
assert len(df) == 3
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = np.array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]], dtype=np.int64)
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
tm.assert_index_equal(df.columns,
Index(['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4']))
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
expected = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]})
out = self.read_csv(StringIO(data))
tm.assert_frame_equal(out, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns, pd.Index(['A', 'B', 'C', 'D']))
assert df.index.name == 'index'
assert isinstance(
df.index[0], (datetime, np.datetime64, Timestamp))
assert df.values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
tm.assert_index_equal(df.columns,
pd.Index(['A', 'B', 'C', 'D', 'E']))
assert isinstance(df.index[0], (datetime, np.datetime64, Timestamp))
assert df.loc[:, ['A', 'B', 'C', 'D']].values.dtype == np.float64
tm.assert_frame_equal(df, df2)
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = self.read_table(fin, sep=";", encoding="utf-8", header=None)
assert isinstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
pytest.raises(ValueError, self.read_csv, StringIO(data))
def test_read_duplicate_index_explicit(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
result = self.read_table(StringIO(data), sep=',', index_col=0)
expected = self.read_table(StringIO(data), sep=',', ).set_index(
'index', verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# make sure an error isn't thrown
self.read_csv(StringIO(data))
self.read_table(StringIO(data), sep=',')
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
assert data['A'].dtype == np.bool_
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.bool_
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
assert data['A'].dtype == np.float64
assert data['B'].dtype == np.int64
def test_read_nrows(self):
expected = self.read_csv(StringIO(self.data1))[:3]
df = self.read_csv(StringIO(self.data1), nrows=3)
tm.assert_frame_equal(df, expected)
# see gh-10476
df = self.read_csv( | StringIO(self.data1) | pandas.compat.StringIO |
#!/usr/bin/python3
import numpy , random , keras , pandas , sklearn
#Import data
data = | pandas.read_csv('fruits.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Fast geotagging of big data (with a progress bar!)
See README.md or geotag.py --help for more information.
"""
import dataclasses
import typing
import geopandas
import pandas
import shapely.geometry
import rtree
import tqdm
tqdm.tqdm.pandas()
@dataclasses.dataclass
class GeotagInput:
input_file: str
input_column: str
output_column: str
class Geotagger:
""" Class encapsulating fast lookups of unique identifiers from spatial
data using an R-tree index.
Attributes:
id_strs: A dict that maps the integer representation of each unique ID
to its original string representation - rtree only supports
integer keys, so this allows us to get back things like leading
zeroes that become lost in the type conversion.
shapes: A dict that maps the integer representation of each unique ID
to its corresponding shape.
index: The R-tree index.
"""
def __init__(self,
gdf: geopandas.geodataframe.GeoDataFrame,
id_column: str = "GEOID",
verbose: bool = False
) -> None:
""" Initialize Geotagger object.
Args:
gdf: A GeoDataFrame containing polygons to use for geotagging.
id_column: The column of the GeoDataFrame (e.g. field of a
shapefile) to pull unique IDs from (must be convertable to int).
verbose: If True, print what is being done.
"""
self.id_strs = {int(id_): id_ for id_ in gdf[id_column]}
self.shapes = gdf.set_index(id_column)["geometry"].to_dict()
self.index = rtree.index.Index()
iterable = self.shapes.items()
if verbose:
iterable = tqdm.tqdm(
self.shapes.items(),
"Creating rtree index",
unit=" indexed"
)
for id_, shape in iterable:
self.index.insert(int(id_), shape.bounds)
def lookup(self, x: float, y: float) -> typing.Optional[int]:
""" Look up a coordinate pair's unique ID.
Args:
x: The longitude, as a float.
y: The latitude, as a float.
Returns:
The unique ID, if any.
"""
results = list(self.index.intersection((x, y, x, y)))
# single result: return it
if len(results) == 1:
return self.id_strs[results[0]]
# multiple results: check which polygon contains the point
else:
point = shapely.geometry.Point(x, y)
for id_ in results:
id_str = self.id_strs[id_]
shape = self.shapes[id_str]
if shape.contains(point):
return id_str
def parse_geotag_input(geotag_input: str) -> GeotagInput:
""" Parse a geotag operation instructions string.
Instruction strings should have the following format:
input_file$input_column>output_column
Args:
geotag_input: A string containing instructions for a geotag operation.
Returns: A GeotagInput containing the parsed geotag_input.
"""
(input_file, other_fields) = geotag_input.split("$")
(input_column, output_column) = other_fields.split(">")
return GeotagInput(
input_file.strip(), input_column.strip(), output_column.strip()
)
def dummy_function(*_, **__) -> None:
""" A function that does nothing. """
pass
if __name__ == "__main__":
import argparse
import glob
import copy
parser = argparse.ArgumentParser()
parser.add_argument(
"geotag", nargs="+",
help="A list of geotag operation instructions. These should be in the format \"input_file$input_column>output_column\". This is passed directly into geopandas.read_file(). To directly read compressed shapefile archives, use \"zip://path/to/shapefile.zip\". NOTE: Be careful about bash! Be sure to enclose in single quotes (lack of quotes will register \">\" as an output redirect; double quotes will still register \"$\" as a variable prefix. Globs are also supported, e.g. \"tl_2020*bg.shp$GEOID>geoid_bg\", but all globbed inputs should have the same fields."
)
parser.add_argument(
"-i", "--input", required=True,
help="The path to the input file. This is passed directly into pandas.read_csv()."
)
parser.add_argument(
"-o", "--output", required=True,
help="The path to the output file. This is passed directly into pandas.core.frame.DataFrame.write_csv(). For compression, simply append \".gz\", etc."
)
parser.add_argument(
"-l", "--longitude", required=True, metavar="LONGITUDE_FIELD",
help="The name of the field containing longitude data in the input file."
)
parser.add_argument(
"-L", "--latitude", required=True, metavar="LATITUDE_FIELD",
help="The name of the field containing latitude data in the input file."
)
parser.add_argument(
"-s", "--subset", metavar="SUBSET_COLUMNS",
help="Optional. Mutually exclusive with -r/--rownames-only. A comma-separated list of fields to subset the input file to. This is passed to pandas.read_csv(), so this can be useful for limiting memory usage."
)
parser.add_argument(
"-r", "--rownames-only", action="store_true", default=False,
help="Optional. Mutually exclusive with -s/--subset. Creates a new column containing the data frame rownames and drops all other columns from the input. Rownames will be R-style, starting at 1, rather than pandas-style, starting at 0."
)
parser.add_argument(
"-f", "--force-overwrite", action="store_true", default=False,
help="Optional. Allows overwriting of existing columns in the data frame post-subsetting. Will not allow for overwriting of the longitude or latitude columns."
)
parser.add_argument(
"-v", "--verbose", action="store_true", default=False,
help="Optional. Causes geotag.py to print out everything it is doing (otherwise the script will run without any output)."
)
args = parser.parse_args()
# verbosity settings
display = print
if not args.verbose:
display = dummy_function
# input validation
geotag_inputs = []
for geotag_input in args.geotag:
try:
geotag_inputs.append(parse_geotag_input(geotag_input))
except:
raise Exception("could not parse geotag instructions {}".format(geotag_input))
original_subset_columns = None
subset_columns = None
if args.subset:
original_subset_columns = args.subset.split(",")
subset_columns = copy.copy(original_subset_columns)
# need to force inclusion of longitude and latitude columns; remove later
if args.longitude not in subset_columns:
display(
"Forcing inclusion of \"{}\" pandas.read_csv (will remove later)"
.format(args.longitude)
)
subset_columns.append(args.longitude)
if args.latitude not in subset_columns:
display(
"Forcing inclusion of \"{}\" in pandas.read_csv (will remove later)"
.format(args.latitude)
)
subset_columns.append(args.latitude)
if args.subset and args.rownames_only:
raise Exception("-s/--subset and -r/--rownames-only are mutually-exclusive")
# check for duplicate output columns
output_columns = [
geotag_input.output_column
for geotag_input in geotag_inputs
]
if len(output_columns) != len(set(output_columns)):
for column in set(output_columns):
# inefficient, but probably not an issue due to small size
output_columns.remove(column)
raise Exception(
"The following output columns are duplicated: {}"
.format(", ".join(output_columns))
)
if args.longitude in output_columns:
raise Exception(
"Longitude column \"{}\" overlaps with an output column"
.format(args.longitude)
)
if args.latitude in output_columns:
raise Exception(
"Latitude column \"{}\" overlaps with an output column"
.format(args.latitude)
)
# read input
if args.subset:
display(
"Reading input file (subsetting to {} as per -s/--subset): {}"
.format(subset_columns, args.input)
)
df = pandas.read_csv(args.input, usecols=subset_columns)
elif args.rownames_only:
required_columns = [args.longitude, args.latitude]
display(
"Reading input file (subsetting to {} as per -r/--rownames-only): {}"
.format(required_columns, args.input)
)
df = pandas.read_csv(args.input, usecols=required_columns)
else:
display("Reading input file: {}".format(args.input))
df = pandas.read_csv(args.input)
# drop null coordinates
n_original_rows = len(df)
df = df.dropna(subset=[args.longitude, args.latitude])
n_dropped_rows = n_original_rows - len(df)
display(
"Dropped {}/{} columns with missing coordinates ({:0.2f}%)".format(
n_dropped_rows, n_original_rows, n_dropped_rows/n_original_rows*100
)
)
# generate rownames
if args.rownames_only:
display("Generating rownames")
df["rowname"] = pandas.Series(df.index).apply(lambda rowname: str(rowname + 1)).astype(str)
# check for overwriting of existing columns
overwritten = set(df.columns).intersection(set(output_columns))
if (len(overwritten) > 0) and (not args.force_overwrite):
raise Exception(
"Refusing to overwrite the following input columns (use -f to force): {}"
.format(list(overwritten))
)
for i, geotag_input in enumerate(geotag_inputs):
display(
"\n({}/{}) Operation: {}${}>{}".format(
i+1, len(geotag_inputs),
geotag_input.input_file,
geotag_input.input_column,
geotag_input.output_column
)
)
if "*" in geotag_input.input_file:
display("Detected glob pattern (\"*\"); reading multiple inputs")
paths = glob.glob(geotag_input.input_file)
parts = []
for i, input_file in enumerate(paths):
display(
"Reading part {}/{}: {}".format(i + 1, len(paths), input_file)
)
parts.append(geopandas.read_file(input_file))
display("Merging {} GeoDataFrames".format(len(parts)))
gdf = geopandas.GeoDataFrame( | pandas.concat(parts) | pandas.concat |
import numpy as np
import pandas as pd
from numpy import nan
from pvlib import modelchain, pvsystem
from pvlib.modelchain import ModelChain
from pvlib.pvsystem import PVSystem
from pvlib.tracking import SingleAxisTracker
from pvlib.location import Location
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pytest
from test_pvsystem import sam_data
from conftest import requires_scipy
@pytest.fixture
def system(sam_data):
modules = sam_data['sandiamod']
module_parameters = modules['Canadian_Solar_CS5P_220M___2009_'].copy()
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def cec_dc_snl_ac_system(sam_data):
modules = sam_data['cecmod']
module_parameters = modules['Canadian_Solar_CS5P_220M'].copy()
module_parameters['b'] = 0.05
module_parameters['EgRef'] = 1.121
module_parameters['dEgdT'] = -0.0002677
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_snl_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverters = sam_data['cecinverter']
inverter = inverters['ABB__MICRO_0_25_I_OUTD_US_208_208V__CEC_2014_'].copy()
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter)
return system
@pytest.fixture
def pvwatts_dc_pvwatts_ac_system(sam_data):
module_parameters = {'pdc0': 220, 'gamma_pdc': -0.003}
inverter_parameters = {'eta_inv_nom': 0.95}
system = PVSystem(module_parameters=module_parameters,
inverter_parameters=inverter_parameters)
return system
@pytest.fixture()
def location():
return Location(32.2, -111, altitude=700)
def test_ModelChain_creation(system, location):
mc = ModelChain(system, location)
def test_orientation_strategy(system, location):
strategies = {}
@pytest.mark.parametrize('strategy,expected', [
(None, (0, 180)), ('None', (0, 180)), ('flat', (0, 180)),
('south_at_latitude_tilt', (32.2, 180))
])
def test_orientation_strategy(strategy, expected, system, location):
mc = ModelChain(system, location, orientation_strategy=strategy)
# the || accounts for the coercion of 'None' to None
assert (mc.orientation_strategy == strategy or
mc.orientation_strategy is None)
assert system.surface_tilt == expected[0]
assert system.surface_azimuth == expected[1]
@requires_scipy
def test_run_model(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 1.82033564e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def test_run_model_with_irradiance(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 1.90054749e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_perez(system, location):
mc = ModelChain(system, location, transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 190.194545796, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
def test_run_model_gueymard_perez(system, location):
mc = ModelChain(system, location, airmass_model='gueymard1993',
transposition_model='perez')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
irradiance = pd.DataFrame({'dni':900, 'ghi':600, 'dhi':150},
index=times)
ac = mc.run_model(times, irradiance=irradiance).ac
expected = pd.Series(np.array([ 190.194760203, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected)
@requires_scipy
def test_run_model_with_weather(system, location):
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
weather = pd.DataFrame({'wind_speed':5, 'temp_air':10}, index=times)
ac = mc.run_model(times, weather=weather).ac
expected = pd.Series(np.array([ 1.99952400e+02, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
@requires_scipy
def test_run_model_tracker(system, location):
system = SingleAxisTracker(module_parameters=system.module_parameters,
inverter_parameters=system.inverter_parameters)
mc = ModelChain(system, location)
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array([ 121.421719, -2.00000000e-02]),
index=times)
assert_series_equal(ac, expected, check_less_precise=2)
expected = pd.DataFrame(np.
array([[ 54.82513187, 90. , 11.0039221 , 11.0039221 ],
[ nan, 0. , 0. , nan]]),
columns=['aoi', 'surface_azimuth', 'surface_tilt', 'tracker_theta'],
index=times)
assert_frame_equal(mc.tracking, expected, check_less_precise=2)
def poadc(mc):
mc.dc = mc.total_irrad['poa_global'] * 0.2
mc.dc.name = None # assert_series_equal will fail without this
@requires_scipy
@pytest.mark.parametrize('dc_model,expected', [
('sapm', [180.13735116, -2.00000000e-02]),
('singlediode', [179.7178188, -2.00000000e-02]),
('pvwatts', [188.400994862, 0]),
(poadc, [187.361841505, 0]) # user supplied function
])
def test_dc_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, dc_model, expected):
dc_systems = {'sapm': system, 'singlediode': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
poadc: pvwatts_dc_pvwatts_ac_system}
system = dc_systems[dc_model]
mc = ModelChain(system, location, dc_model=dc_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def acdc(mc):
mc.ac = mc.dc
@requires_scipy
@pytest.mark.parametrize('ac_model,expected', [
('snlinverter', [180.13735116, -2.00000000e-02]),
pytest.mark.xfail(raises=NotImplementedError)
(('adrinverter', [179.7178188, -2.00000000e-02])),
('pvwatts', [188.400994862, 0]),
(acdc, [198.11956073, 0]) # user supplied function
])
def test_ac_models(system, cec_dc_snl_ac_system, pvwatts_dc_pvwatts_ac_system,
location, ac_model, expected):
ac_systems = {'snlinverter': system, 'adrinverter': cec_dc_snl_ac_system,
'pvwatts': pvwatts_dc_pvwatts_ac_system,
acdc: pvwatts_dc_pvwatts_ac_system}
system = ac_systems[ac_model]
mc = ModelChain(system, location, ac_model=ac_model,
aoi_model='no_loss', spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def constant_aoi_loss(mc):
mc.aoi_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('aoi_model,expected', [
('sapm', [181.297862126, -2.00000000e-02]),
('ashrae', [179.371460714, -2.00000000e-02]),
('physical', [179.98844351, -2.00000000e-02]),
('no_loss', [180.13735116, -2.00000000e-02]),
(constant_aoi_loss, [163.800168358, -2e-2])
])
def test_aoi_models(system, location, aoi_model, expected):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model=aoi_model, spectral_model='no_loss')
times = pd.date_range('20160101 1200-0700', periods=2, freq='6H')
ac = mc.run_model(times).ac
expected = pd.Series(np.array(expected), index=times)
assert_series_equal(ac, expected, check_less_precise=2)
def constant_spectral_loss(mc):
mc.spectral_modifier = 0.9
@requires_scipy
@pytest.mark.parametrize('spectral_model,expected', [
('sapm', [180.865917827, -2.00000000e-02]),
pytest.mark.xfail(raises=NotImplementedError)
(('first_solar', [179.371460714, -2.00000000e-02])),
('no_loss', [180.13735116, -2.00000000e-02]),
(constant_spectral_loss, [161.732659674, -2e-2])
])
def test_spectral_models(system, location, spectral_model, expected):
mc = ModelChain(system, location, dc_model='sapm',
aoi_model='no_loss', spectral_model=spectral_model)
times = | pd.date_range('20160101 1200-0700', periods=2, freq='6H') | pandas.date_range |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(pd.ewma(SOk, span=n, min_periods=n - 1), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = pd.Series(pd.rolling_mean(df[key], timeperiod, min_periods=timeperiod), name='SMA_' + str(timeperiod))
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = pd.ewma(df['Close'], span=n, min_periods=n - 1)
EX2 = pd.ewma(EX1, span=n, min_periods=n - 1)
EX3 = pd.ewma(EX2, span=n, min_periods=n - 1)
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span=n, min_periods=n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1) / ATR,name='PosDI')
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1) / ATR,name='NegDI')
result = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span=n_ADX, min_periods=n_ADX - 1), name='ADX_' + str(n) + '_' + str(n_ADX))
result = pd.concat([df,PosDI,NegDI,result], join='outer', axis=1,ignore_index=True)
result.columns=["High","Low","Close","PosDI","NegDI","ADX"]
return result
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(pd.ewma(df[price], span=n_fast, min_periods=n_slow - 1))
EMAslow = pd.Series(pd.ewma(df[price], span=n_slow, min_periods=n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span=9, min_periods=8), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span=9, min_periods=8)
EX2 = pd.ewma(EX1, span=9, min_periods=8)
Mass = EX1 / EX2
result = pd.Series(pd.rolling_sum(Mass, 25), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
result = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iloc[i + 1]['High'] - df.iloc[i]['High']
DoMove = df.iloc[i]['Low'] - df.iloc[i + 1]['Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1))
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1))
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(pd.ewma(M, span=r, min_periods=r - 1))
aEMA1 = pd.Series(pd.ewma(aM, span=r, min_periods=r - 1))
EMA2 = pd.Series(pd.ewma(EMA1, span=s, min_periods=s - 1))
aEMA2 = pd.Series(pd.ewma(aEMA1, span=s, min_periods=s - 1))
result = pd.Series(EMA2 / aEMA2, name='TSI_' + str(r) + '_' + str(s))
return out(SETTINGS, df, result)
def ACCDIST(df, n):
"""
Accumulation/Distribution
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
M = ad.diff(n - 1)
N = ad.shift(n - 1)
ROC = M / N
result = pd.Series(ROC, name='Acc/Dist_ROC_' + str(n))
return out(SETTINGS, df, result)
def Chaikin(df):
"""
Chaikin Oscillator
"""
ad = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low']) * df['Volume']
result = pd.Series(pd.ewma(ad, span=3, min_periods=2) - pd.ewma(ad, span=10, min_periods=9), name='Chaikin')
return out(SETTINGS, df, result)
def MFI(df, n):
"""
Money Flow Index and Ratio
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
i = 0
PosMF = [0]
while i < len(df) - 1: # df.index[-1]:
if PP[i + 1] > PP[i]:
PosMF.append(PP[i + 1] * df.get_value(i + 1, 'Volume'))
else:
PosMF.append(0)
i=i + 1
PosMF = pd.Series(PosMF)
TotMF = PP * df['Volume']
MFR = pd.Series(PosMF / TotMF)
result = pd.Series(pd.rolling_mean(MFR, n), name='MFI_' + str(n))
return out(SETTINGS, df, result)
def OBV(df, n):
"""
On-balance Volume
"""
i = 0
OBV = [0]
while i < len(df) - 1: # df.index[-1]:
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') > 0:
OBV.append(df.get_value(i + 1, 'Volume'))
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') == 0:
OBV.append(0)
if df.get_value(i + 1, 'Close') - df.get_value(i, 'Close') < 0:
OBV.append(-df.get_value(i + 1, 'Volume'))
i = i + 1
OBV = pd.Series(OBV)
result = pd.Series(pd.rolling_mean(OBV, n), name='OBV_' + str(n))
return out(SETTINGS, df, result)
def FORCE(df, n):
"""
Force Index
"""
result = pd.Series(df['Close'].diff(n) * df['Volume'].diff(n), name='Force_' + str(n))
return out(SETTINGS, df, result)
def EOM(df, n):
"""
Ease of Movement
"""
EoM = (df['High'].diff(1) + df['Low'].diff(1)) * (df['High'] - df['Low']) / (2 * df['Volume'])
result = pd.Series(pd.rolling_mean(EoM, n), name='EoM_' + str(n))
return out(SETTINGS, df, result)
def CCI(df, n):
"""
Commodity Channel Index
"""
PP = (df['High'] + df['Low'] + df['Close']) / 3
result = pd.Series((PP - | pd.rolling_mean(PP, n) | pandas.rolling_mean |
import pytest
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG, _period_code_map
from pandas.errors import OutOfBoundsDatetime
from pandas import Period, Timestamp, offsets
class TestFreqConversion:
"""Test frequency conversion of date objects"""
@pytest.mark.parametrize("freq", ["A", "Q", "M", "W", "B", "D"])
def test_asfreq_near_zero(self, freq):
# GH#19643, GH#19650
per = Period("0001-01-01", freq=freq)
tup1 = (per.year, per.hour, per.day)
prev = per - 1
assert prev.ordinal == per.ordinal - 1
tup2 = (prev.year, prev.month, prev.day)
assert tup2 < tup1
def test_asfreq_near_zero_weekly(self):
# GH#19834
per1 = Period("0001-01-01", "D") + 6
per2 = Period("0001-01-01", "D") - 6
week1 = per1.asfreq("W")
week2 = per2.asfreq("W")
assert week1 != week2
assert week1.asfreq("D", "E") >= per1
assert week2.asfreq("D", "S") <= per2
def test_to_timestamp_out_of_bounds(self):
# GH#19643, used to incorrectly give Timestamp in 1754
per = Period("0001-01-01", freq="B")
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
per.to_timestamp()
def test_asfreq_corner(self):
val = Period(freq="A", year=2007)
result1 = val.asfreq("5t")
result2 = val.asfreq("t")
expected = Period("2007-12-31 23:59", freq="t")
assert result1.ordinal == expected.ordinal
assert result1.freqstr == "5T"
assert result2.ordinal == expected.ordinal
assert result2.freqstr == "T"
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq="A", year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq="Q", year=2007, quarter=1)
ival_A_to_Q_end = Period(freq="Q", year=2007, quarter=4)
ival_A_to_M_start = Period(freq="M", year=2007, month=1)
ival_A_to_M_end = Period(freq="M", year=2007, month=12)
ival_A_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq="W", year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq="B", year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq="D", year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq="H", year=2007, month=12, day=31, hour=23)
ival_A_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_A_to_T_end = Period(
freq="Min", year=2007, month=12, day=31, hour=23, minute=59
)
ival_A_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_A_to_S_end = Period(
freq="S", year=2007, month=12, day=31, hour=23, minute=59, second=59
)
ival_AJAN_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq="D", year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq="D", year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq="D", year=2006, month=12, day=1)
assert ival_A.asfreq("Q", "S") == ival_A_to_Q_start
assert ival_A.asfreq("Q", "e") == ival_A_to_Q_end
assert ival_A.asfreq("M", "s") == ival_A_to_M_start
assert ival_A.asfreq("M", "E") == ival_A_to_M_end
assert ival_A.asfreq("W", "S") == ival_A_to_W_start
assert ival_A.asfreq("W", "E") == ival_A_to_W_end
assert ival_A.asfreq("B", "S") == ival_A_to_B_start
assert ival_A.asfreq("B", "E") == ival_A_to_B_end
assert ival_A.asfreq("D", "S") == ival_A_to_D_start
assert ival_A.asfreq("D", "E") == ival_A_to_D_end
assert ival_A.asfreq("H", "S") == ival_A_to_H_start
assert ival_A.asfreq("H", "E") == ival_A_to_H_end
assert ival_A.asfreq("min", "S") == ival_A_to_T_start
assert ival_A.asfreq("min", "E") == ival_A_to_T_end
assert ival_A.asfreq("T", "S") == ival_A_to_T_start
assert ival_A.asfreq("T", "E") == ival_A_to_T_end
assert ival_A.asfreq("S", "S") == ival_A_to_S_start
assert ival_A.asfreq("S", "E") == ival_A_to_S_end
assert ival_AJAN.asfreq("D", "S") == ival_AJAN_to_D_start
assert ival_AJAN.asfreq("D", "E") == ival_AJAN_to_D_end
assert ival_AJUN.asfreq("D", "S") == ival_AJUN_to_D_start
assert ival_AJUN.asfreq("D", "E") == ival_AJUN_to_D_end
assert ival_ANOV.asfreq("D", "S") == ival_ANOV_to_D_start
assert ival_ANOV.asfreq("D", "E") == ival_ANOV_to_D_end
assert ival_A.asfreq("A") == ival_A
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq="Q", year=2007, quarter=1)
ival_Q_end_of_year = Period(freq="Q", year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq="A", year=2007)
ival_Q_to_M_start = Period(freq="M", year=2007, month=1)
ival_Q_to_M_end = Period(freq="M", year=2007, month=3)
ival_Q_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq="W", year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq="B", year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq="D", year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq="H", year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_Q_to_T_end = Period(
freq="Min", year=2007, month=3, day=31, hour=23, minute=59
)
ival_Q_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_Q_to_S_end = Period(
freq="S", year=2007, month=3, day=31, hour=23, minute=59, second=59
)
ival_QEJAN_to_D_start = Period(freq="D", year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq="D", year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq="D", year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq="D", year=2006, month=9, day=30)
assert ival_Q.asfreq("A") == ival_Q_to_A
assert ival_Q_end_of_year.asfreq("A") == ival_Q_to_A
assert ival_Q.asfreq("M", "S") == ival_Q_to_M_start
assert ival_Q.asfreq("M", "E") == ival_Q_to_M_end
assert ival_Q.asfreq("W", "S") == ival_Q_to_W_start
assert ival_Q.asfreq("W", "E") == ival_Q_to_W_end
assert ival_Q.asfreq("B", "S") == ival_Q_to_B_start
assert ival_Q.asfreq("B", "E") == ival_Q_to_B_end
assert ival_Q.asfreq("D", "S") == ival_Q_to_D_start
assert ival_Q.asfreq("D", "E") == ival_Q_to_D_end
assert ival_Q.asfreq("H", "S") == ival_Q_to_H_start
assert ival_Q.asfreq("H", "E") == ival_Q_to_H_end
assert ival_Q.asfreq("Min", "S") == ival_Q_to_T_start
assert ival_Q.asfreq("Min", "E") == ival_Q_to_T_end
assert ival_Q.asfreq("S", "S") == ival_Q_to_S_start
assert ival_Q.asfreq("S", "E") == ival_Q_to_S_end
assert ival_QEJAN.asfreq("D", "S") == ival_QEJAN_to_D_start
assert ival_QEJAN.asfreq("D", "E") == ival_QEJAN_to_D_end
assert ival_QEJUN.asfreq("D", "S") == ival_QEJUN_to_D_start
assert ival_QEJUN.asfreq("D", "E") == ival_QEJUN_to_D_end
assert ival_Q.asfreq("Q") == ival_Q
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq="M", year=2007, month=1)
ival_M_end_of_year = Period(freq="M", year=2007, month=12)
ival_M_end_of_quarter = Period(freq="M", year=2007, month=3)
ival_M_to_A = Period(freq="A", year=2007)
ival_M_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_M_to_W_start = Period(freq="W", year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq="W", year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq="B", year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq="D", year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq="H", year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_M_to_T_end = Period(
freq="Min", year=2007, month=1, day=31, hour=23, minute=59
)
ival_M_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_M_to_S_end = Period(
freq="S", year=2007, month=1, day=31, hour=23, minute=59, second=59
)
assert ival_M.asfreq("A") == ival_M_to_A
assert ival_M_end_of_year.asfreq("A") == ival_M_to_A
assert ival_M.asfreq("Q") == ival_M_to_Q
assert ival_M_end_of_quarter.asfreq("Q") == ival_M_to_Q
assert ival_M.asfreq("W", "S") == ival_M_to_W_start
assert ival_M.asfreq("W", "E") == ival_M_to_W_end
assert ival_M.asfreq("B", "S") == ival_M_to_B_start
assert ival_M.asfreq("B", "E") == ival_M_to_B_end
assert ival_M.asfreq("D", "S") == ival_M_to_D_start
assert ival_M.asfreq("D", "E") == ival_M_to_D_end
assert ival_M.asfreq("H", "S") == ival_M_to_H_start
assert ival_M.asfreq("H", "E") == ival_M_to_H_end
assert ival_M.asfreq("Min", "S") == ival_M_to_T_start
assert ival_M.asfreq("Min", "E") == ival_M_to_T_end
assert ival_M.asfreq("S", "S") == ival_M_to_S_start
assert ival_M.asfreq("S", "E") == ival_M_to_S_end
assert ival_M.asfreq("M") == ival_M
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq="W", year=2007, month=1, day=1)
ival_WSUN = Period(freq="W", year=2007, month=1, day=7)
ival_WSAT = Period(freq="W-SAT", year=2007, month=1, day=6)
ival_WFRI = Period(freq="W-FRI", year=2007, month=1, day=5)
ival_WTHU = Period(freq="W-THU", year=2007, month=1, day=4)
ival_WWED = Period(freq="W-WED", year=2007, month=1, day=3)
ival_WTUE = Period(freq="W-TUE", year=2007, month=1, day=2)
ival_WMON = Period(freq="W-MON", year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq="D", year=2006, month=12, day=31)
ival_WSAT_to_D_end = Period(freq="D", year=2007, month=1, day=6)
ival_WFRI_to_D_start = Period(freq="D", year=2006, month=12, day=30)
ival_WFRI_to_D_end = Period(freq="D", year=2007, month=1, day=5)
ival_WTHU_to_D_start = Period(freq="D", year=2006, month=12, day=29)
ival_WTHU_to_D_end = Period(freq="D", year=2007, month=1, day=4)
ival_WWED_to_D_start = Period(freq="D", year=2006, month=12, day=28)
ival_WWED_to_D_end = Period(freq="D", year=2007, month=1, day=3)
ival_WTUE_to_D_start = Period(freq="D", year=2006, month=12, day=27)
ival_WTUE_to_D_end = Period(freq="D", year=2007, month=1, day=2)
ival_WMON_to_D_start = Period(freq="D", year=2006, month=12, day=26)
ival_WMON_to_D_end = Period(freq="D", year=2007, month=1, day=1)
ival_W_end_of_year = Period(freq="W", year=2007, month=12, day=31)
ival_W_end_of_quarter = Period(freq="W", year=2007, month=3, day=31)
ival_W_end_of_month = Period(freq="W", year=2007, month=1, day=31)
ival_W_to_A = Period(freq="A", year=2007)
ival_W_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_W_to_M = Period(freq="M", year=2007, month=1)
if Period(freq="D", year=2007, month=12, day=31).weekday == 6:
ival_W_to_A_end_of_year = Period(freq="A", year=2007)
else:
ival_W_to_A_end_of_year = Period(freq="A", year=2008)
if Period(freq="D", year=2007, month=3, day=31).weekday == 6:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=1)
else:
ival_W_to_Q_end_of_quarter = Period(freq="Q", year=2007, quarter=2)
if Period(freq="D", year=2007, month=1, day=31).weekday == 6:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=1)
else:
ival_W_to_M_end_of_month = Period(freq="M", year=2007, month=2)
ival_W_to_B_start = Period(freq="B", year=2007, month=1, day=1)
ival_W_to_B_end = Period(freq="B", year=2007, month=1, day=5)
ival_W_to_D_start = Period(freq="D", year=2007, month=1, day=1)
ival_W_to_D_end = Period(freq="D", year=2007, month=1, day=7)
ival_W_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_W_to_H_end = Period(freq="H", year=2007, month=1, day=7, hour=23)
ival_W_to_T_start = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0
)
ival_W_to_T_end = Period(
freq="Min", year=2007, month=1, day=7, hour=23, minute=59
)
ival_W_to_S_start = Period(
freq="S", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
ival_W_to_S_end = Period(
freq="S", year=2007, month=1, day=7, hour=23, minute=59, second=59
)
assert ival_W.asfreq("A") == ival_W_to_A
assert ival_W_end_of_year.asfreq("A") == ival_W_to_A_end_of_year
assert ival_W.asfreq("Q") == ival_W_to_Q
assert ival_W_end_of_quarter.asfreq("Q") == ival_W_to_Q_end_of_quarter
assert ival_W.asfreq("M") == ival_W_to_M
assert ival_W_end_of_month.asfreq("M") == ival_W_to_M_end_of_month
assert ival_W.asfreq("B", "S") == ival_W_to_B_start
assert ival_W.asfreq("B", "E") == ival_W_to_B_end
assert ival_W.asfreq("D", "S") == ival_W_to_D_start
assert ival_W.asfreq("D", "E") == ival_W_to_D_end
assert ival_WSUN.asfreq("D", "S") == ival_WSUN_to_D_start
assert ival_WSUN.asfreq("D", "E") == ival_WSUN_to_D_end
assert ival_WSAT.asfreq("D", "S") == ival_WSAT_to_D_start
assert ival_WSAT.asfreq("D", "E") == ival_WSAT_to_D_end
assert ival_WFRI.asfreq("D", "S") == ival_WFRI_to_D_start
assert ival_WFRI.asfreq("D", "E") == ival_WFRI_to_D_end
assert ival_WTHU.asfreq("D", "S") == ival_WTHU_to_D_start
assert ival_WTHU.asfreq("D", "E") == ival_WTHU_to_D_end
assert ival_WWED.asfreq("D", "S") == ival_WWED_to_D_start
assert ival_WWED.asfreq("D", "E") == ival_WWED_to_D_end
assert ival_WTUE.asfreq("D", "S") == ival_WTUE_to_D_start
assert ival_WTUE.asfreq("D", "E") == ival_WTUE_to_D_end
assert ival_WMON.asfreq("D", "S") == ival_WMON_to_D_start
assert ival_WMON.asfreq("D", "E") == ival_WMON_to_D_end
assert ival_W.asfreq("H", "S") == ival_W_to_H_start
assert ival_W.asfreq("H", "E") == ival_W_to_H_end
assert ival_W.asfreq("Min", "S") == ival_W_to_T_start
assert ival_W.asfreq("Min", "E") == ival_W_to_T_end
assert ival_W.asfreq("S", "S") == ival_W_to_S_start
assert ival_W.asfreq("S", "E") == ival_W_to_S_end
assert ival_W.asfreq("W") == ival_W
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
ival_W.asfreq("WK")
def test_conv_weekly_legacy(self):
# frequency conversion tests: from Weekly Frequency
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=1)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-SAT", year=2007, month=1, day=6)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-FRI", year=2007, month=1, day=5)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-THU", year=2007, month=1, day=4)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-WED", year=2007, month=1, day=3)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-TUE", year=2007, month=1, day=2)
with pytest.raises(ValueError, match=msg):
Period(freq="WK-MON", year=2007, month=1, day=1)
def test_conv_business(self):
# frequency conversion tests: from Business Frequency"
ival_B = Period(freq="B", year=2007, month=1, day=1)
ival_B_end_of_year = Period(freq="B", year=2007, month=12, day=31)
ival_B_end_of_quarter = Period(freq="B", year=2007, month=3, day=30)
ival_B_end_of_month = Period(freq="B", year=2007, month=1, day=31)
ival_B_end_of_week = Period(freq="B", year=2007, month=1, day=5)
ival_B_to_A = Period(freq="A", year=2007)
ival_B_to_Q = Period(freq="Q", year=2007, quarter=1)
ival_B_to_M = Period(freq="M", year=2007, month=1)
ival_B_to_W = Period(freq="W", year=2007, month=1, day=7)
ival_B_to_D = Period(freq="D", year=2007, month=1, day=1)
ival_B_to_H_start = Period(freq="H", year=2007, month=1, day=1, hour=0)
ival_B_to_H_end = | Period(freq="H", year=2007, month=1, day=1, hour=23) | pandas.Period |
""" Compare the convergence rate for the synthesis/analysis 1d TV-l1 problem.
"""
# Authors: <NAME> <<EMAIL>>
# Authors: <NAME> <<EMAIL>>
# License: BSD (3-clause)
import os
import time
import pathlib
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from joblib import Memory, Parallel, delayed
from carpet.datasets import synthetic_1d_dataset
from carpet.metrics import compute_prox_tv_errors
from carpet.loss_gradient import analysis_primal_obj, tv_reg
from carpet import ListaTV, LpgdTautString, CoupledIstaLASSO # noqa: F401
from carpet.iterative_solver import IstaAnalysis, IstaSynthesis
OUTPUT_DIR = pathlib.Path('outputs_plots')
SCRIPT_NAME, _ = os.path.splitext(os.path.basename(__file__))
memory = Memory('__cache_dir__', verbose=0)
def logspace_layers(n_layers=10, max_depth=50):
""" Return n_layers, from 1 to max_depth of different number of layers to
define networks """
all_n_layers = np.logspace(0, np.log10(max_depth), n_layers).astype(int)
return list(np.unique(all_n_layers))
###########################################################################
# Main experiment runner
@memory.cache(ignore=['verbose', 'device'])
def run_one(x_train, x_test, A, D, L, lmbd, network, all_n_layers, key,
extra_args, meta={}, device=None, verbose=1):
params = None
log = []
print(f"[main script] running {key}")
print("-" * 80)
extra_args_per_layer = extra_args.copy()
extra_args_per_layer['learn_prox'] = 'per-layer'
def record_loss(n_layers, algo, k=key, ea=extra_args):
u_train = algo.transform_to_u(x_train, lmbd)
u_test = algo.transform_to_u(x_test, lmbd)
if isinstance(algo, ListaTV):
prox_tv_loss_train = compute_prox_tv_errors(algo, x_train, lmbd)
prox_tv_loss_test = compute_prox_tv_errors(algo, x_test, lmbd)
else:
prox_tv_loss_train = prox_tv_loss_test = None
log.append(dict(
key=k, **meta, lmbd=lmbd, extra_args=ea,
n_layers=n_layers,
train_loss=analysis_primal_obj(u_train, A, D, x_train, lmbd),
test_loss=analysis_primal_obj(u_test, A, D, x_test, lmbd),
train_reg=tv_reg(u_train, D),
test_reg=tv_reg(u_test, D),
prox_tv_loss_train=prox_tv_loss_train,
prox_tv_loss_test=prox_tv_loss_test
))
algo = network(A=A, n_layers=0)
record_loss(n_layers=0, algo=algo)
for i, n_layers in enumerate(all_n_layers):
# declare network
algo = network(A=A, n_layers=n_layers, initial_parameters=params,
**extra_args, device=device, verbose=verbose)
t0_ = time.time()
algo.fit(x_train, lmbd)
delta_ = time.time() - t0_
# save parameters
params = algo.export_parameters()
# get train and test error
record_loss(n_layers=n_layers, algo=algo)
if verbose > 0:
train_loss = log[-1]['train_loss']
test_loss = log[-1]['test_loss']
print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted "
f"{delta_:4.1f}s train-loss={train_loss:.4e} "
f"test-loss={test_loss:.4e}")
if network == ListaTV:
# declare network
algo = network(A=A, n_layers=n_layers, initial_parameters=params,
**extra_args_per_layer, device=device,
verbose=verbose)
t0_ = time.time()
algo.fit(x_train, lmbd)
delta_ = time.time() - t0_
# get train and test error
record_loss(n_layers=n_layers, algo=algo,
k=key.replace('none', 'per-layer'),
ea=extra_args_per_layer)
if verbose > 0:
train_loss = log[-1]['train_loss']
test_loss = log[-1]['test_loss']
print(f"\r[{algo.name}|layers#{n_layers:3d}] model fitted "
f"{delta_:4.1f}s train-loss={train_loss:.4e} "
f"test-loss={test_loss:.4e}")
return log
def run_experiment(max_iter, max_iter_ref=1000, lmbd=.1, seed=None,
net_solver_type='recursive', n_jobs=1, device=None):
# Define variables
n_samples_train = 1000
n_samples_testing = 1000
n_samples = n_samples_train + n_samples_testing
n_atoms = 8
n_dim = 5
s = 0.2
snr = 0.0
# Layers that are sampled
all_n_layers = logspace_layers(n_layers=10, max_depth=40)
timestamp = datetime.now()
print(__doc__)
print('*' * 80)
print(f"Script started on: {timestamp.strftime('%Y/%m/%d %Hh%M')}")
if seed is None:
seed = np.random.randint(0, 1000)
print(f'Seed used = {seed}')
# Store meta data of the problem
meta_pb = dict(n_atoms=n_atoms, n_dim=n_dim, s=s, snr=snr, seed=seed,
n_samples_train=n_samples_train,
n_samples_testing=n_samples_testing)
# Generate data
x, _, z, L, D, A = synthetic_1d_dataset(
n_atoms=n_atoms, n_dim=n_dim, n=n_samples, s=s, snr=snr, seed=seed
)
x_train = x[n_samples_testing:, :]
x_test = x[:n_samples_testing, :]
learning_parameters = dict(
net_solver_type=net_solver_type, max_iter=max_iter
)
methods = {
'lista_synthesis': {
'label': 'Synthesis LISTA',
'network': CoupledIstaLASSO,
'extra_args': dict(**learning_parameters),
'style': dict(color='tab:orange', marker='*', linestyle='-')
},
'lpgd_taut': {
'label': 'Analysis LPGD - taut-string',
'network': LpgdTautString,
'extra_args': dict(**learning_parameters),
'style': dict(color='tab:red', marker='*', linestyle='-.')
},
'ista_synthesis': {
'label': 'Synthesis ISTA',
'network': IstaSynthesis,
'extra_args': dict(momentum=None),
'style': dict(color='tab:orange', marker='s', linestyle='--')
},
'fista_synthesis': {
'label': 'Synthesis FISTA',
'network': IstaSynthesis,
'extra_args': dict(momentum='fista'),
'style': dict(color='tab:orange', marker='*', linestyle='--')
},
'ista_analysis': {
'label': 'Analysis ISTA',
'network': IstaAnalysis,
'extra_args': dict(momentum=None),
'style': dict(color='tab:red', marker='s', linestyle='--')
},
'fista_analysis': {
'label': 'Analysis FISTA',
'network': IstaAnalysis,
'extra_args': dict(momentum='fista'),
'style': dict(color='tab:red', marker='*', linestyle='--')
},
# reference cost, use all_n_layers to override the computations
'reference': {
'label': 'Analysis FISTA',
'network': IstaAnalysis,
'extra_args': dict(momentum='fista'),
'style': dict(color='tab:red', marker='*', linestyle='--'),
'all_n_layers': [max_iter_ref]
}
}
# for i, learn_prox in enumerate(['none', 'global', 'per-layer']):
for i, learn_prox in enumerate(['none']):
# for n_inner_layers, marker in [(10, '*'), (50, 's'), (100, 'h'),
# (300, 'o'), (500, '>')]:
for n_inner_layers, marker in [(50, 's'), (20, '*')]:
methods[f'lpgd_lista_{learn_prox}_{n_inner_layers}'] = {
'label': f'LPGD - LISTA[{learn_prox}-{n_inner_layers}]',
'network': ListaTV,
'extra_args': dict(n_inner_layers=n_inner_layers,
learn_prox=learn_prox,
**learning_parameters),
'style': dict(color=f'C{i}', marker=marker, linestyle='-')
}
# launch all experiments
print("=" * 80)
t0 = time.time()
results = Parallel(n_jobs=n_jobs)(
delayed(run_one)(x_train, x_test, A, D, L, lmbd=lmbd, key=k,
network=m['network'], extra_args=m['extra_args'],
all_n_layers=m.get('all_n_layers', all_n_layers),
device=device, meta=meta_pb)
for k, m in methods.items()
)
# concatenate all results as a big list. Also update style and label
# here to avoid recomputing the results when changing the style only.
log = []
for records in results:
for rec in records:
k = rec['key']
m = methods.get(k, None)
if m is None:
from copy import deepcopy
m = deepcopy(methods[k.replace('per-layer', 'none')])
m['style']['color'] = 'C1'
m['label'] = m['label'].replace('none', 'per-layer')
rec.update(style=m['style'], label=m['label'])
log.append(rec)
# Save the computations in a pickle file
df = | pd.DataFrame(log) | pandas.DataFrame |
# Copyright (c) 2020 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pre-processing
- extract data from output.xml files generated by Jenkins jobs and store in
pandas' Series,
- provide access to the data.
- filter the data using tags,
"""
import re
import copy
import resource
import logging
from collections import OrderedDict
from os import remove, walk, listdir
from os.path import isfile, isdir, join
from datetime import datetime as dt
from datetime import timedelta
from json import loads
from json.decoder import JSONDecodeError
import hdrh.histogram
import hdrh.codec
import prettytable
import pandas as pd
from robot.api import ExecutionResult, ResultVisitor
from robot import errors
from resources.libraries.python import jumpavg
from input_data_files import download_and_unzip_data_file
from pal_errors import PresentationError
# Separator used in file names
SEPARATOR = u"__"
class ExecutionChecker(ResultVisitor):
"""Class to traverse through the test suite structure.
The functionality implemented in this class generates a json structure:
Performance tests:
{
"metadata": {
"generated": "Timestamp",
"version": "SUT version",
"job": "Jenkins job name",
"build": "Information about the build"
},
"suites": {
"Suite long name 1": {
"name": Suite name,
"doc": "Suite 1 documentation",
"parent": "Suite 1 parent",
"level": "Level of the suite in the suite hierarchy"
}
"Suite long name N": {
"name": Suite name,
"doc": "Suite N documentation",
"parent": "Suite 2 parent",
"level": "Level of the suite in the suite hierarchy"
}
}
"tests": {
# NDRPDR tests:
"ID": {
"name": "Test name",
"parent": "Name of the parent of the test",
"doc": "Test documentation",
"msg": "Test message",
"conf-history": "DUT1 and DUT2 VAT History",
"show-run": "Show Run",
"tags": ["tag 1", "tag 2", "tag n"],
"type": "NDRPDR",
"status": "PASS" | "FAIL",
"throughput": {
"NDR": {
"LOWER": float,
"UPPER": float
},
"PDR": {
"LOWER": float,
"UPPER": float
}
},
"latency": {
"NDR": {
"direction1": {
"min": float,
"avg": float,
"max": float,
"hdrh": str
},
"direction2": {
"min": float,
"avg": float,
"max": float,
"hdrh": str
}
},
"PDR": {
"direction1": {
"min": float,
"avg": float,
"max": float,
"hdrh": str
},
"direction2": {
"min": float,
"avg": float,
"max": float,
"hdrh": str
}
}
}
}
# TCP tests:
"ID": {
"name": "Test name",
"parent": "Name of the parent of the test",
"doc": "Test documentation",
"msg": "Test message",
"tags": ["tag 1", "tag 2", "tag n"],
"type": "TCP",
"status": "PASS" | "FAIL",
"result": int
}
# MRR, BMRR tests:
"ID": {
"name": "Test name",
"parent": "Name of the parent of the test",
"doc": "Test documentation",
"msg": "Test message",
"tags": ["tag 1", "tag 2", "tag n"],
"type": "MRR" | "BMRR",
"status": "PASS" | "FAIL",
"result": {
"receive-rate": float,
# Average of a list, computed using AvgStdevStats.
# In CSIT-1180, replace with List[float].
}
}
"ID" {
# next test
}
}
}
Functional tests:
{
"metadata": { # Optional
"version": "VPP version",
"job": "Jenkins job name",
"build": "Information about the build"
},
"suites": {
"Suite name 1": {
"doc": "Suite 1 documentation",
"parent": "Suite 1 parent",
"level": "Level of the suite in the suite hierarchy"
}
"Suite name N": {
"doc": "Suite N documentation",
"parent": "Suite 2 parent",
"level": "Level of the suite in the suite hierarchy"
}
}
"tests": {
"ID": {
"name": "Test name",
"parent": "Name of the parent of the test",
"doc": "Test documentation"
"msg": "Test message"
"tags": ["tag 1", "tag 2", "tag n"],
"conf-history": "DUT1 and DUT2 VAT History"
"show-run": "Show Run"
"status": "PASS" | "FAIL"
},
"ID" {
# next test
}
}
}
.. note:: ID is the lowercase full path to the test.
"""
REGEX_PLR_RATE = re.compile(
r'PLRsearch lower bound::?\s(\d+.\d+).*\n'
r'PLRsearch upper bound::?\s(\d+.\d+)'
)
REGEX_NDRPDR_RATE = re.compile(
r'NDR_LOWER:\s(\d+.\d+).*\n.*\n'
r'NDR_UPPER:\s(\d+.\d+).*\n'
r'PDR_LOWER:\s(\d+.\d+).*\n.*\n'
r'PDR_UPPER:\s(\d+.\d+)'
)
REGEX_NDRPDR_GBPS = re.compile(
r'NDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
r'NDR_UPPER:.*,\s(\d+.\d+).*\n'
r'PDR_LOWER:.*,\s(\d+.\d+).*\n.*\n'
r'PDR_UPPER:.*,\s(\d+.\d+)'
)
REGEX_PERF_MSG_INFO = re.compile(
r'NDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
r'PDR_LOWER:\s(\d+.\d+)\s.*\s(\d+.\d+)\s.*\n.*\n.*\n'
r'Latency at 90% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
r'Latency at 50% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
r'Latency at 10% PDR:.*\[\'(.*)\', \'(.*)\'\].*\n'
)
REGEX_MRR_MSG_INFO = re.compile(r'.*\[(.*)\]')
# TODO: Remove when not needed
REGEX_NDRPDR_LAT_BASE = re.compile(
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]'
)
REGEX_NDRPDR_LAT = re.compile(
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]'
)
# TODO: Remove when not needed
REGEX_NDRPDR_LAT_LONG = re.compile(
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n.*\n'
r'LATENCY.*\[\'(.*)\', \'(.*)\'\]\s\n.*\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]\s\n'
r'Latency.*\[\'(.*)\', \'(.*)\'\]'
)
REGEX_VERSION_VPP = re.compile(
r"(return STDOUT Version:\s*|"
r"VPP Version:\s*|VPP version:\s*)(.*)"
)
REGEX_VERSION_DPDK = re.compile(
r"(DPDK version:\s*|DPDK Version:\s*)(.*)"
)
REGEX_TCP = re.compile(
r'Total\s(rps|cps|throughput):\s(\d*).*$'
)
REGEX_MRR = re.compile(
r'MaxReceivedRate_Results\s\[pkts/(\d*)sec\]:\s'
r'tx\s(\d*),\srx\s(\d*)'
)
REGEX_BMRR = re.compile(
r'Maximum Receive Rate trial results'
r' in packets per second: \[(.*)\]'
)
REGEX_RECONF_LOSS = re.compile(
r'Packets lost due to reconfig: (\d*)'
)
REGEX_RECONF_TIME = re.compile(
r'Implied time lost: (\d*.[\de-]*)'
)
REGEX_TC_TAG = re.compile(r'\d+[tT]\d+[cC]')
REGEX_TC_NAME_OLD = re.compile(r'-\d+[tT]\d+[cC]-')
REGEX_TC_NAME_NEW = re.compile(r'-\d+[cC]-')
REGEX_TC_NUMBER = re.compile(r'tc\d{2}-')
REGEX_TC_PAPI_CLI = re.compile(r'.*\((\d+.\d+.\d+.\d+.) - (.*)\)')
def __init__(self, metadata, mapping, ignore):
"""Initialisation.
:param metadata: Key-value pairs to be included in "metadata" part of
JSON structure.
:param mapping: Mapping of the old names of test cases to the new
(actual) one.
:param ignore: List of TCs to be ignored.
:type metadata: dict
:type mapping: dict
:type ignore: list
"""
# Type of message to parse out from the test messages
self._msg_type = None
# VPP version
self._version = None
# Timestamp
self._timestamp = None
# Testbed. The testbed is identified by TG node IP address.
self._testbed = None
# Mapping of TCs long names
self._mapping = mapping
# Ignore list
self._ignore = ignore
# Number of PAPI History messages found:
# 0 - no message
# 1 - PAPI History of DUT1
# 2 - PAPI History of DUT2
self._conf_history_lookup_nr = 0
self._sh_run_counter = 0
# Test ID of currently processed test- the lowercase full path to the
# test
self._test_id = None
# The main data structure
self._data = {
u"metadata": OrderedDict(),
u"suites": OrderedDict(),
u"tests": OrderedDict()
}
# Save the provided metadata
for key, val in metadata.items():
self._data[u"metadata"][key] = val
# Dictionary defining the methods used to parse different types of
# messages
self.parse_msg = {
u"timestamp": self._get_timestamp,
u"vpp-version": self._get_vpp_version,
u"dpdk-version": self._get_dpdk_version,
# TODO: Remove when not needed:
u"teardown-vat-history": self._get_vat_history,
u"teardown-papi-history": self._get_papi_history,
u"test-show-runtime": self._get_show_run,
u"testbed": self._get_testbed
}
@property
def data(self):
"""Getter - Data parsed from the XML file.
:returns: Data parsed from the XML file.
:rtype: dict
"""
return self._data
def _get_data_from_mrr_test_msg(self, msg):
"""Get info from message of MRR performance tests.
:param msg: Message to be processed.
:type msg: str
:returns: Processed message or original message if a problem occurs.
:rtype: str
"""
groups = re.search(self.REGEX_MRR_MSG_INFO, msg)
if not groups or groups.lastindex != 1:
return u"Test Failed."
try:
data = groups.group(1).split(u", ")
except (AttributeError, IndexError, ValueError, KeyError):
return u"Test Failed."
out_str = u"["
try:
for item in data:
out_str += f"{(float(item) / 1e6):.2f}, "
return out_str[:-2] + u"]"
except (AttributeError, IndexError, ValueError, KeyError):
return u"Test Failed."
def _get_data_from_perf_test_msg(self, msg):
"""Get info from message of NDRPDR performance tests.
:param msg: Message to be processed.
:type msg: str
:returns: Processed message or original message if a problem occurs.
:rtype: str
"""
groups = re.search(self.REGEX_PERF_MSG_INFO, msg)
if not groups or groups.lastindex != 10:
return u"Test Failed."
try:
data = {
u"ndr_low": float(groups.group(1)),
u"ndr_low_b": float(groups.group(2)),
u"pdr_low": float(groups.group(3)),
u"pdr_low_b": float(groups.group(4)),
u"pdr_lat_90_1": groups.group(5),
u"pdr_lat_90_2": groups.group(6),
u"pdr_lat_50_1": groups.group(7),
u"pdr_lat_50_2": groups.group(8),
u"pdr_lat_10_1": groups.group(9),
u"pdr_lat_10_2": groups.group(10),
}
except (AttributeError, IndexError, ValueError, KeyError):
return u"Test Failed."
def _process_lat(in_str_1, in_str_2):
"""Extract min, avg, max values from latency string.
:param in_str_1: Latency string for one direction produced by robot
framework.
:param in_str_2: Latency string for second direction produced by
robot framework.
:type in_str_1: str
:type in_str_2: str
:returns: Processed latency string or None if a problem occurs.
:rtype: tuple
"""
in_list_1 = in_str_1.split('/', 3)
in_list_2 = in_str_2.split('/', 3)
if len(in_list_1) != 4 and len(in_list_2) != 4:
return None
in_list_1[3] += u"=" * (len(in_list_1[3]) % 4)
try:
hdr_lat_1 = hdrh.histogram.HdrHistogram.decode(in_list_1[3])
except hdrh.codec.HdrLengthException:
return None
in_list_2[3] += u"=" * (len(in_list_2[3]) % 4)
try:
hdr_lat_2 = hdrh.histogram.HdrHistogram.decode(in_list_2[3])
except hdrh.codec.HdrLengthException:
return None
if hdr_lat_1 and hdr_lat_2:
hdr_lat = (
hdr_lat_1.get_value_at_percentile(50.0),
hdr_lat_1.get_value_at_percentile(90.0),
hdr_lat_1.get_value_at_percentile(99.0),
hdr_lat_2.get_value_at_percentile(50.0),
hdr_lat_2.get_value_at_percentile(90.0),
hdr_lat_2.get_value_at_percentile(99.0)
)
if all(hdr_lat):
return hdr_lat
return None
try:
out_msg = (
f"1. {(data[u'ndr_low'] / 1e6):5.2f} "
f"{data[u'ndr_low_b']:5.2f}"
f"\n2. {(data[u'pdr_low'] / 1e6):5.2f} "
f"{data[u'pdr_low_b']:5.2f}"
)
latency = (
_process_lat(data[u'pdr_lat_10_1'], data[u'pdr_lat_10_2']),
_process_lat(data[u'pdr_lat_50_1'], data[u'pdr_lat_50_2']),
_process_lat(data[u'pdr_lat_90_1'], data[u'pdr_lat_90_2'])
)
if all(latency):
max_len = len(str(max((max(item) for item in latency))))
max_len = 4 if max_len < 4 else max_len
for idx, lat in enumerate(latency):
if not idx:
out_msg += u"\n"
out_msg += (
f"\n{idx + 3}. "
f"{lat[0]:{max_len}d} "
f"{lat[1]:{max_len}d} "
f"{lat[2]:{max_len}d} "
f"{lat[3]:{max_len}d} "
f"{lat[4]:{max_len}d} "
f"{lat[5]:{max_len}d} "
)
return out_msg
except (AttributeError, IndexError, ValueError, KeyError):
return u"Test Failed."
def _get_testbed(self, msg):
"""Called when extraction of testbed IP is required.
The testbed is identified by TG node IP address.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if msg.message.count(u"Setup of TG node") or \
msg.message.count(u"Setup of node TG host"):
reg_tg_ip = re.compile(
r'.*TG .* (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}).*')
try:
self._testbed = str(re.search(reg_tg_ip, msg.message).group(1))
except (KeyError, ValueError, IndexError, AttributeError):
pass
finally:
self._data[u"metadata"][u"testbed"] = self._testbed
self._msg_type = None
def _get_vpp_version(self, msg):
"""Called when extraction of VPP version is required.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if msg.message.count(u"return STDOUT Version:") or \
msg.message.count(u"VPP Version:") or \
msg.message.count(u"VPP version:"):
self._version = str(re.search(self.REGEX_VERSION_VPP, msg.message).
group(2))
self._data[u"metadata"][u"version"] = self._version
self._msg_type = None
def _get_dpdk_version(self, msg):
"""Called when extraction of DPDK version is required.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if msg.message.count(u"DPDK Version:"):
try:
self._version = str(re.search(
self.REGEX_VERSION_DPDK, msg.message).group(2))
self._data[u"metadata"][u"version"] = self._version
except IndexError:
pass
finally:
self._msg_type = None
def _get_timestamp(self, msg):
"""Called when extraction of timestamp is required.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
self._timestamp = msg.timestamp[:14]
self._data[u"metadata"][u"generated"] = self._timestamp
self._msg_type = None
def _get_vat_history(self, msg):
"""Called when extraction of VAT command history is required.
TODO: Remove when not needed.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if msg.message.count(u"VAT command history:"):
self._conf_history_lookup_nr += 1
if self._conf_history_lookup_nr == 1:
self._data[u"tests"][self._test_id][u"conf-history"] = str()
else:
self._msg_type = None
text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
r"VAT command history:", u"",
msg.message, count=1).replace(u'\n', u' |br| ').\
replace(u'"', u"'")
self._data[u"tests"][self._test_id][u"conf-history"] += (
f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
)
def _get_papi_history(self, msg):
"""Called when extraction of PAPI command history is required.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if msg.message.count(u"PAPI command history:"):
self._conf_history_lookup_nr += 1
if self._conf_history_lookup_nr == 1:
self._data[u"tests"][self._test_id][u"conf-history"] = str()
else:
self._msg_type = None
text = re.sub(r"\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} "
r"PAPI command history:", u"",
msg.message, count=1).replace(u'\n', u' |br| ').\
replace(u'"', u"'")
self._data[u"tests"][self._test_id][u"conf-history"] += (
f" |br| **DUT{str(self._conf_history_lookup_nr)}:** {text}"
)
def _get_show_run(self, msg):
"""Called when extraction of VPP operational data (output of CLI command
Show Runtime) is required.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if not msg.message.count(u"stats runtime"):
return
# Temporary solution
if self._sh_run_counter > 1:
return
if u"show-run" not in self._data[u"tests"][self._test_id].keys():
self._data[u"tests"][self._test_id][u"show-run"] = dict()
groups = re.search(self.REGEX_TC_PAPI_CLI, msg.message)
if not groups:
return
try:
host = groups.group(1)
except (AttributeError, IndexError):
host = u""
try:
sock = groups.group(2)
except (AttributeError, IndexError):
sock = u""
runtime = loads(str(msg.message).replace(u' ', u'').replace(u'\n', u'').
replace(u"'", u'"').replace(u'b"', u'"').
replace(u'u"', u'"').split(u":", 1)[1])
try:
threads_nr = len(runtime[0][u"clocks"])
except (IndexError, KeyError):
return
dut = u"DUT{nr}".format(
nr=len(self._data[u'tests'][self._test_id][u'show-run'].keys()) + 1)
oper = {
u"host": host,
u"socket": sock,
u"threads": OrderedDict({idx: list() for idx in range(threads_nr)})
}
for item in runtime:
for idx in range(threads_nr):
if item[u"vectors"][idx] > 0:
clocks = item[u"clocks"][idx] / item[u"vectors"][idx]
elif item[u"calls"][idx] > 0:
clocks = item[u"clocks"][idx] / item[u"calls"][idx]
elif item[u"suspends"][idx] > 0:
clocks = item[u"clocks"][idx] / item[u"suspends"][idx]
else:
clocks = 0.0
if item[u"calls"][idx] > 0:
vectors_call = item[u"vectors"][idx] / item[u"calls"][idx]
else:
vectors_call = 0.0
if int(item[u"calls"][idx]) + int(item[u"vectors"][idx]) + \
int(item[u"suspends"][idx]):
oper[u"threads"][idx].append([
item[u"name"],
item[u"calls"][idx],
item[u"vectors"][idx],
item[u"suspends"][idx],
clocks,
vectors_call
])
self._data[u'tests'][self._test_id][u'show-run'][dut] = copy.copy(oper)
def _get_ndrpdr_throughput(self, msg):
"""Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER from the test
message.
:param msg: The test message to be parsed.
:type msg: str
:returns: Parsed data as a dict and the status (PASS/FAIL).
:rtype: tuple(dict, str)
"""
throughput = {
u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
}
status = u"FAIL"
groups = re.search(self.REGEX_NDRPDR_RATE, msg)
if groups is not None:
try:
throughput[u"NDR"][u"LOWER"] = float(groups.group(1))
throughput[u"NDR"][u"UPPER"] = float(groups.group(2))
throughput[u"PDR"][u"LOWER"] = float(groups.group(3))
throughput[u"PDR"][u"UPPER"] = float(groups.group(4))
status = u"PASS"
except (IndexError, ValueError):
pass
return throughput, status
def _get_ndrpdr_throughput_gbps(self, msg):
"""Get NDR_LOWER, NDR_UPPER, PDR_LOWER and PDR_UPPER in Gbps from the
test message.
:param msg: The test message to be parsed.
:type msg: str
:returns: Parsed data as a dict and the status (PASS/FAIL).
:rtype: tuple(dict, str)
"""
gbps = {
u"NDR": {u"LOWER": -1.0, u"UPPER": -1.0},
u"PDR": {u"LOWER": -1.0, u"UPPER": -1.0}
}
status = u"FAIL"
groups = re.search(self.REGEX_NDRPDR_GBPS, msg)
if groups is not None:
try:
gbps[u"NDR"][u"LOWER"] = float(groups.group(1))
gbps[u"NDR"][u"UPPER"] = float(groups.group(2))
gbps[u"PDR"][u"LOWER"] = float(groups.group(3))
gbps[u"PDR"][u"UPPER"] = float(groups.group(4))
status = u"PASS"
except (IndexError, ValueError):
pass
return gbps, status
def _get_plr_throughput(self, msg):
"""Get PLRsearch lower bound and PLRsearch upper bound from the test
message.
:param msg: The test message to be parsed.
:type msg: str
:returns: Parsed data as a dict and the status (PASS/FAIL).
:rtype: tuple(dict, str)
"""
throughput = {
u"LOWER": -1.0,
u"UPPER": -1.0
}
status = u"FAIL"
groups = re.search(self.REGEX_PLR_RATE, msg)
if groups is not None:
try:
throughput[u"LOWER"] = float(groups.group(1))
throughput[u"UPPER"] = float(groups.group(2))
status = u"PASS"
except (IndexError, ValueError):
pass
return throughput, status
def _get_ndrpdr_latency(self, msg):
"""Get LATENCY from the test message.
:param msg: The test message to be parsed.
:type msg: str
:returns: Parsed data as a dict and the status (PASS/FAIL).
:rtype: tuple(dict, str)
"""
latency_default = {
u"min": -1.0,
u"avg": -1.0,
u"max": -1.0,
u"hdrh": u""
}
latency = {
u"NDR": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
u"PDR": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
u"LAT0": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
u"PDR10": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
u"PDR50": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
u"PDR90": {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
},
}
# TODO: Rewrite when long and base are not needed
groups = re.search(self.REGEX_NDRPDR_LAT_LONG, msg)
if groups is None:
groups = re.search(self.REGEX_NDRPDR_LAT, msg)
if groups is None:
groups = re.search(self.REGEX_NDRPDR_LAT_BASE, msg)
if groups is None:
return latency, u"FAIL"
def process_latency(in_str):
"""Return object with parsed latency values.
TODO: Define class for the return type.
:param in_str: Input string, min/avg/max/hdrh format.
:type in_str: str
:returns: Dict with corresponding keys, except hdrh float values.
:rtype dict:
:throws IndexError: If in_str does not have enough substrings.
:throws ValueError: If a substring does not convert to float.
"""
in_list = in_str.split('/', 3)
rval = {
u"min": float(in_list[0]),
u"avg": float(in_list[1]),
u"max": float(in_list[2]),
u"hdrh": u""
}
if len(in_list) == 4:
rval[u"hdrh"] = str(in_list[3])
return rval
try:
latency[u"NDR"][u"direction1"] = process_latency(groups.group(1))
latency[u"NDR"][u"direction2"] = process_latency(groups.group(2))
latency[u"PDR"][u"direction1"] = process_latency(groups.group(3))
latency[u"PDR"][u"direction2"] = process_latency(groups.group(4))
if groups.lastindex == 4:
return latency, u"PASS"
except (IndexError, ValueError):
pass
try:
latency[u"PDR90"][u"direction1"] = process_latency(groups.group(5))
latency[u"PDR90"][u"direction2"] = process_latency(groups.group(6))
latency[u"PDR50"][u"direction1"] = process_latency(groups.group(7))
latency[u"PDR50"][u"direction2"] = process_latency(groups.group(8))
latency[u"PDR10"][u"direction1"] = process_latency(groups.group(9))
latency[u"PDR10"][u"direction2"] = process_latency(groups.group(10))
latency[u"LAT0"][u"direction1"] = process_latency(groups.group(11))
latency[u"LAT0"][u"direction2"] = process_latency(groups.group(12))
if groups.lastindex == 12:
return latency, u"PASS"
except (IndexError, ValueError):
pass
# TODO: Remove when not needed
latency[u"NDR10"] = {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
}
latency[u"NDR50"] = {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
}
latency[u"NDR90"] = {
u"direction1": copy.copy(latency_default),
u"direction2": copy.copy(latency_default)
}
try:
latency[u"LAT0"][u"direction1"] = process_latency(groups.group(5))
latency[u"LAT0"][u"direction2"] = process_latency(groups.group(6))
latency[u"NDR10"][u"direction1"] = process_latency(groups.group(7))
latency[u"NDR10"][u"direction2"] = process_latency(groups.group(8))
latency[u"NDR50"][u"direction1"] = process_latency(groups.group(9))
latency[u"NDR50"][u"direction2"] = process_latency(groups.group(10))
latency[u"NDR90"][u"direction1"] = process_latency(groups.group(11))
latency[u"NDR90"][u"direction2"] = process_latency(groups.group(12))
latency[u"PDR10"][u"direction1"] = process_latency(groups.group(13))
latency[u"PDR10"][u"direction2"] = process_latency(groups.group(14))
latency[u"PDR50"][u"direction1"] = process_latency(groups.group(15))
latency[u"PDR50"][u"direction2"] = process_latency(groups.group(16))
latency[u"PDR90"][u"direction1"] = process_latency(groups.group(17))
latency[u"PDR90"][u"direction2"] = process_latency(groups.group(18))
return latency, u"PASS"
except (IndexError, ValueError):
pass
return latency, u"FAIL"
@staticmethod
def _get_hoststack_data(msg, tags):
"""Get data from the hoststack test message.
:param msg: The test message to be parsed.
:param tags: Test tags.
:type msg: str
:type tags: list
:returns: Parsed data as a JSON dict and the status (PASS/FAIL).
:rtype: tuple(dict, str)
"""
result = dict()
status = u"FAIL"
msg = msg.replace(u"'", u'"').replace(u" ", u"")
if u"LDPRELOAD" in tags:
try:
result = loads(msg)
status = u"PASS"
except JSONDecodeError:
pass
elif u"VPPECHO" in tags:
try:
msg_lst = msg.replace(u"}{", u"} {").split(u" ")
result = dict(
client=loads(msg_lst[0]),
server=loads(msg_lst[1])
)
status = u"PASS"
except (JSONDecodeError, IndexError):
pass
return result, status
def visit_suite(self, suite):
"""Implements traversing through the suite and its direct children.
:param suite: Suite to process.
:type suite: Suite
:returns: Nothing.
"""
if self.start_suite(suite) is not False:
suite.suites.visit(self)
suite.tests.visit(self)
self.end_suite(suite)
def start_suite(self, suite):
"""Called when suite starts.
:param suite: Suite to process.
:type suite: Suite
:returns: Nothing.
"""
try:
parent_name = suite.parent.name
except AttributeError:
return
doc_str = suite.doc.\
replace(u'"', u"'").\
replace(u'\n', u' ').\
replace(u'\r', u'').\
replace(u'*[', u' |br| *[').\
replace(u"*", u"**").\
replace(u' |br| *[', u'*[', 1)
self._data[u"suites"][suite.longname.lower().
replace(u'"', u"'").
replace(u" ", u"_")] = {
u"name": suite.name.lower(),
u"doc": doc_str,
u"parent": parent_name,
u"level": len(suite.longname.split(u"."))
}
suite.keywords.visit(self)
def end_suite(self, suite):
"""Called when suite ends.
:param suite: Suite to process.
:type suite: Suite
:returns: Nothing.
"""
def visit_test(self, test):
"""Implements traversing through the test.
:param test: Test to process.
:type test: Test
:returns: Nothing.
"""
if self.start_test(test) is not False:
test.keywords.visit(self)
self.end_test(test)
def start_test(self, test):
"""Called when test starts.
:param test: Test to process.
:type test: Test
:returns: Nothing.
"""
self._sh_run_counter = 0
longname_orig = test.longname.lower()
# Check the ignore list
if longname_orig in self._ignore:
return
tags = [str(tag) for tag in test.tags]
test_result = dict()
# Change the TC long name and name if defined in the mapping table
longname = self._mapping.get(longname_orig, None)
if longname is not None:
name = longname.split(u'.')[-1]
logging.debug(
f"{self._data[u'metadata']}\n{longname_orig}\n{longname}\n"
f"{name}"
)
else:
longname = longname_orig
name = test.name.lower()
# Remove TC number from the TC long name (backward compatibility):
self._test_id = re.sub(
self.REGEX_TC_NUMBER, u"", longname.replace(u"snat", u"nat")
)
# Remove TC number from the TC name (not needed):
test_result[u"name"] = re.sub(
self.REGEX_TC_NUMBER, "", name.replace(u"snat", u"nat")
)
test_result[u"parent"] = test.parent.name.lower().\
replace(u"snat", u"nat")
test_result[u"tags"] = tags
test_result["doc"] = test.doc.\
replace(u'"', u"'").\
replace(u'\n', u' ').\
replace(u'\r', u'').\
replace(u'[', u' |br| [').\
replace(u' |br| [', u'[', 1)
test_result[u"type"] = u"FUNC"
test_result[u"status"] = test.status
if test.status == u"PASS":
if u"NDRPDR" in tags:
test_result[u"msg"] = self._get_data_from_perf_test_msg(
test.message).replace(u'\n', u' |br| ').\
replace(u'\r', u'').replace(u'"', u"'")
elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
test_result[u"msg"] = self._get_data_from_mrr_test_msg(
test.message).replace(u'\n', u' |br| ').\
replace(u'\r', u'').replace(u'"', u"'")
else:
test_result[u"msg"] = test.message.replace(u'\n', u' |br| ').\
replace(u'\r', u'').replace(u'"', u"'")
else:
test_result[u"msg"] = u"Test Failed."
if u"PERFTEST" in tags:
# Replace info about cores (e.g. -1c-) with the info about threads
# and cores (e.g. -1t1c-) in the long test case names and in the
# test case names if necessary.
groups = re.search(self.REGEX_TC_NAME_OLD, self._test_id)
if not groups:
tag_count = 0
tag_tc = str()
for tag in test_result[u"tags"]:
groups = re.search(self.REGEX_TC_TAG, tag)
if groups:
tag_count += 1
tag_tc = tag
if tag_count == 1:
self._test_id = re.sub(
self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
self._test_id, count=1
)
test_result[u"name"] = re.sub(
self.REGEX_TC_NAME_NEW, f"-{tag_tc.lower()}-",
test_result["name"], count=1
)
else:
test_result[u"status"] = u"FAIL"
self._data[u"tests"][self._test_id] = test_result
logging.debug(
f"The test {self._test_id} has no or more than one "
f"multi-threading tags.\n"
f"Tags: {test_result[u'tags']}"
)
return
if test.status == u"PASS":
if u"DEVICETEST" in tags:
test_result[u"type"] = u"DEVICETEST"
elif u"NDRPDR" in tags:
test_result[u"type"] = u"NDRPDR"
test_result[u"throughput"], test_result[u"status"] = \
self._get_ndrpdr_throughput(test.message)
test_result[u"gbps"], test_result[u"status"] = \
self._get_ndrpdr_throughput_gbps(test.message)
test_result[u"latency"], test_result[u"status"] = \
self._get_ndrpdr_latency(test.message)
elif u"SOAK" in tags:
test_result[u"type"] = u"SOAK"
test_result[u"throughput"], test_result[u"status"] = \
self._get_plr_throughput(test.message)
elif u"HOSTSTACK" in tags:
test_result[u"type"] = u"HOSTSTACK"
test_result[u"result"], test_result[u"status"] = \
self._get_hoststack_data(test.message, tags)
elif u"TCP" in tags:
test_result[u"type"] = u"TCP"
groups = re.search(self.REGEX_TCP, test.message)
test_result[u"result"] = int(groups.group(2))
elif u"MRR" in tags or u"FRMOBL" in tags or u"BMRR" in tags:
if u"MRR" in tags:
test_result[u"type"] = u"MRR"
else:
test_result[u"type"] = u"BMRR"
test_result[u"result"] = dict()
groups = re.search(self.REGEX_BMRR, test.message)
if groups is not None:
items_str = groups.group(1)
items_float = [
float(item.strip()) for item in items_str.split(",")
]
# Use whole list in CSIT-1180.
stats = jumpavg.AvgStdevStats.for_runs(items_float)
test_result[u"result"][u"receive-rate"] = stats.avg
test_result[u"result"][u"receive-stdev"] = stats.stdev
else:
groups = re.search(self.REGEX_MRR, test.message)
test_result[u"result"][u"receive-rate"] = \
float(groups.group(3)) / float(groups.group(1))
elif u"RECONF" in tags:
test_result[u"type"] = u"RECONF"
test_result[u"result"] = None
try:
grps_loss = re.search(self.REGEX_RECONF_LOSS, test.message)
grps_time = re.search(self.REGEX_RECONF_TIME, test.message)
test_result[u"result"] = {
u"loss": int(grps_loss.group(1)),
u"time": float(grps_time.group(1))
}
except (AttributeError, IndexError, ValueError, TypeError):
test_result[u"status"] = u"FAIL"
else:
test_result[u"status"] = u"FAIL"
self._data[u"tests"][self._test_id] = test_result
return
self._data[u"tests"][self._test_id] = test_result
def end_test(self, test):
"""Called when test ends.
:param test: Test to process.
:type test: Test
:returns: Nothing.
"""
def visit_keyword(self, keyword):
"""Implements traversing through the keyword and its child keywords.
:param keyword: Keyword to process.
:type keyword: Keyword
:returns: Nothing.
"""
if self.start_keyword(keyword) is not False:
self.end_keyword(keyword)
def start_keyword(self, keyword):
"""Called when keyword starts. Default implementation does nothing.
:param keyword: Keyword to process.
:type keyword: Keyword
:returns: Nothing.
"""
try:
if keyword.type == u"setup":
self.visit_setup_kw(keyword)
elif keyword.type == u"teardown":
self.visit_teardown_kw(keyword)
else:
self.visit_test_kw(keyword)
except AttributeError:
pass
def end_keyword(self, keyword):
"""Called when keyword ends. Default implementation does nothing.
:param keyword: Keyword to process.
:type keyword: Keyword
:returns: Nothing.
"""
def visit_test_kw(self, test_kw):
"""Implements traversing through the test keyword and its child
keywords.
:param test_kw: Keyword to process.
:type test_kw: Keyword
:returns: Nothing.
"""
for keyword in test_kw.keywords:
if self.start_test_kw(keyword) is not False:
self.visit_test_kw(keyword)
self.end_test_kw(keyword)
def start_test_kw(self, test_kw):
"""Called when test keyword starts. Default implementation does
nothing.
:param test_kw: Keyword to process.
:type test_kw: Keyword
:returns: Nothing.
"""
if test_kw.name.count(u"Show Runtime On All Duts") or \
test_kw.name.count(u"Show Runtime Counters On All Duts") or \
test_kw.name.count(u"Vpp Show Runtime On All Duts"):
self._msg_type = u"test-show-runtime"
self._sh_run_counter += 1
else:
return
test_kw.messages.visit(self)
def end_test_kw(self, test_kw):
"""Called when keyword ends. Default implementation does nothing.
:param test_kw: Keyword to process.
:type test_kw: Keyword
:returns: Nothing.
"""
def visit_setup_kw(self, setup_kw):
"""Implements traversing through the teardown keyword and its child
keywords.
:param setup_kw: Keyword to process.
:type setup_kw: Keyword
:returns: Nothing.
"""
for keyword in setup_kw.keywords:
if self.start_setup_kw(keyword) is not False:
self.visit_setup_kw(keyword)
self.end_setup_kw(keyword)
def start_setup_kw(self, setup_kw):
"""Called when teardown keyword starts. Default implementation does
nothing.
:param setup_kw: Keyword to process.
:type setup_kw: Keyword
:returns: Nothing.
"""
if setup_kw.name.count(u"Show Vpp Version On All Duts") \
and not self._version:
self._msg_type = u"vpp-version"
elif setup_kw.name.count(u"Install Dpdk Framework On All Duts") and \
not self._version:
self._msg_type = u"dpdk-version"
elif setup_kw.name.count(u"Set Global Variable") \
and not self._timestamp:
self._msg_type = u"timestamp"
elif setup_kw.name.count(u"Setup Framework") and not self._testbed:
self._msg_type = u"testbed"
else:
return
setup_kw.messages.visit(self)
def end_setup_kw(self, setup_kw):
"""Called when keyword ends. Default implementation does nothing.
:param setup_kw: Keyword to process.
:type setup_kw: Keyword
:returns: Nothing.
"""
def visit_teardown_kw(self, teardown_kw):
"""Implements traversing through the teardown keyword and its child
keywords.
:param teardown_kw: Keyword to process.
:type teardown_kw: Keyword
:returns: Nothing.
"""
for keyword in teardown_kw.keywords:
if self.start_teardown_kw(keyword) is not False:
self.visit_teardown_kw(keyword)
self.end_teardown_kw(keyword)
def start_teardown_kw(self, teardown_kw):
"""Called when teardown keyword starts
:param teardown_kw: Keyword to process.
:type teardown_kw: Keyword
:returns: Nothing.
"""
if teardown_kw.name.count(u"Show Vat History On All Duts"):
# TODO: Remove when not needed:
self._conf_history_lookup_nr = 0
self._msg_type = u"teardown-vat-history"
teardown_kw.messages.visit(self)
elif teardown_kw.name.count(u"Show Papi History On All Duts"):
self._conf_history_lookup_nr = 0
self._msg_type = u"teardown-papi-history"
teardown_kw.messages.visit(self)
def end_teardown_kw(self, teardown_kw):
"""Called when keyword ends. Default implementation does nothing.
:param teardown_kw: Keyword to process.
:type teardown_kw: Keyword
:returns: Nothing.
"""
def visit_message(self, msg):
"""Implements visiting the message.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if self.start_message(msg) is not False:
self.end_message(msg)
def start_message(self, msg):
"""Called when message starts. Get required information from messages:
- VPP version.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
if self._msg_type:
self.parse_msg[self._msg_type](msg)
def end_message(self, msg):
"""Called when message ends. Default implementation does nothing.
:param msg: Message to process.
:type msg: Message
:returns: Nothing.
"""
class InputData:
"""Input data
The data is extracted from output.xml files generated by Jenkins jobs and
stored in pandas' DataFrames.
The data structure:
- job name
- build number
- metadata
(as described in ExecutionChecker documentation)
- suites
(as described in ExecutionChecker documentation)
- tests
(as described in ExecutionChecker documentation)
"""
def __init__(self, spec):
"""Initialization.
:param spec: Specification.
:type spec: Specification
"""
# Specification:
self._cfg = spec
# Data store:
self._input_data = pd.Series()
@property
def data(self):
"""Getter - Input data.
:returns: Input data
:rtype: pandas.Series
"""
return self._input_data
def metadata(self, job, build):
"""Getter - metadata
:param job: Job which metadata we want.
:param build: Build which metadata we want.
:type job: str
:type build: str
:returns: Metadata
:rtype: pandas.Series
"""
return self.data[job][build][u"metadata"]
def suites(self, job, build):
"""Getter - suites
:param job: Job which suites we want.
:param build: Build which suites we want.
:type job: str
:type build: str
:returns: Suites.
:rtype: pandas.Series
"""
return self.data[job][str(build)][u"suites"]
def tests(self, job, build):
"""Getter - tests
:param job: Job which tests we want.
:param build: Build which tests we want.
:type job: str
:type build: str
:returns: Tests.
:rtype: pandas.Series
"""
return self.data[job][build][u"tests"]
def _parse_tests(self, job, build):
"""Process data from robot output.xml file and return JSON structured
data.
:param job: The name of job which build output data will be processed.
:param build: The build which output data will be processed.
:type job: str
:type build: dict
:returns: JSON data structure.
:rtype: dict
"""
metadata = {
u"job": job,
u"build": build
}
with open(build[u"file-name"], u'r') as data_file:
try:
result = ExecutionResult(data_file)
except errors.DataError as err:
logging.error(
f"Error occurred while parsing output.xml: {repr(err)}"
)
return None
checker = ExecutionChecker(metadata, self._cfg.mapping,
self._cfg.ignore)
result.visit(checker)
return checker.data
def _download_and_parse_build(self, job, build, repeat, pid=10000):
"""Download and parse the input data file.
:param pid: PID of the process executing this method.
:param job: Name of the Jenkins job which generated the processed input
file.
:param build: Information about the Jenkins build which generated the
processed input file.
:param repeat: Repeat the download specified number of times if not
successful.
:type pid: int
:type job: str
:type build: dict
:type repeat: int
"""
logging.info(f" Processing the job/build: {job}: {build[u'build']}")
state = u"failed"
success = False
data = None
do_repeat = repeat
while do_repeat:
success = download_and_unzip_data_file(self._cfg, job, build, pid)
if success:
break
do_repeat -= 1
if not success:
logging.error(
f"It is not possible to download the input data file from the "
f"job {job}, build {build[u'build']}, or it is damaged. "
f"Skipped."
)
if success:
logging.info(f" Processing data from build {build[u'build']}")
data = self._parse_tests(job, build)
if data is None:
logging.error(
f"Input data file from the job {job}, build "
f"{build[u'build']} is damaged. Skipped."
)
else:
state = u"processed"
try:
remove(build[u"file-name"])
except OSError as err:
logging.error(
f"Cannot remove the file {build[u'file-name']}: {repr(err)}"
)
# If the time-period is defined in the specification file, remove all
# files which are outside the time period.
is_last = False
timeperiod = self._cfg.input.get(u"time-period", None)
if timeperiod and data:
now = dt.utcnow()
timeperiod = timedelta(int(timeperiod))
metadata = data.get(u"metadata", None)
if metadata:
generated = metadata.get(u"generated", None)
if generated:
generated = dt.strptime(generated, u"%Y%m%d %H:%M")
if (now - generated) > timeperiod:
# Remove the data and the file:
state = u"removed"
data = None
is_last = True
logging.info(
f" The build {job}/{build[u'build']} is "
f"outdated, will be removed."
)
logging.info(u" Done.")
return {
u"data": data,
u"state": state,
u"job": job,
u"build": build,
u"last": is_last
}
def download_and_parse_data(self, repeat=1):
"""Download the input data files, parse input data from input files and
store in pandas' Series.
:param repeat: Repeat the download specified number of times if not
successful.
:type repeat: int
"""
logging.info(u"Downloading and parsing input files ...")
for job, builds in self._cfg.builds.items():
for build in builds:
result = self._download_and_parse_build(job, build, repeat)
if result[u"last"]:
break
build_nr = result[u"build"][u"build"]
if result[u"data"]:
data = result[u"data"]
build_data = pd.Series({
u"metadata": pd.Series(
list(data[u"metadata"].values()),
index=list(data[u"metadata"].keys())
),
u"suites": pd.Series(
list(data[u"suites"].values()),
index=list(data[u"suites"].keys())
),
u"tests": pd.Series(
list(data[u"tests"].values()),
index=list(data[u"tests"].keys())
)
})
if self._input_data.get(job, None) is None:
self._input_data[job] = pd.Series()
self._input_data[job][str(build_nr)] = build_data
self._cfg.set_input_file_name(
job, build_nr, result[u"build"][u"file-name"])
self._cfg.set_input_state(job, build_nr, result[u"state"])
mem_alloc = \
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
logging.info(f"Memory allocation: {mem_alloc:.0f}MB")
logging.info(u"Done.")
def process_local_file(self, local_file, job=u"local", build_nr=1,
replace=True):
"""Process local XML file given as a command-line parameter.
:param local_file: The file to process.
:param job: Job name.
:param build_nr: Build number.
:param replace: If True, the information about jobs and builds is
replaced by the new one, otherwise the new jobs and builds are
added.
:type local_file: str
:type job: str
:type build_nr: int
:type replace: bool
:raises: PresentationError if an error occurs.
"""
if not isfile(local_file):
raise PresentationError(f"The file {local_file} does not exist.")
try:
build_nr = int(local_file.split(u"/")[-1].split(u".")[0])
except (IndexError, ValueError):
pass
build = {
u"build": build_nr,
u"status": u"failed",
u"file-name": local_file
}
if replace:
self._cfg.builds = dict()
self._cfg.add_build(job, build)
logging.info(f"Processing {job}: {build_nr:2d}: {local_file}")
data = self._parse_tests(job, build)
if data is None:
raise PresentationError(
f"Error occurred while parsing the file {local_file}"
)
build_data = pd.Series({
u"metadata": pd.Series(
list(data[u"metadata"].values()),
index=list(data[u"metadata"].keys())
),
u"suites": pd.Series(
list(data[u"suites"].values()),
index=list(data[u"suites"].keys())
),
u"tests": pd.Series(
list(data[u"tests"].values()),
index=list(data[u"tests"].keys())
)
})
if self._input_data.get(job, None) is None:
self._input_data[job] = pd.Series()
self._input_data[job][str(build_nr)] = build_data
self._cfg.set_input_state(job, build_nr, u"processed")
def process_local_directory(self, local_dir, replace=True):
"""Process local directory with XML file(s). The directory is processed
as a 'job' and the XML files in it as builds.
If the given directory contains only sub-directories, these
sub-directories processed as jobs and corresponding XML files as builds
of their job.
:param local_dir: Local directory to process.
:param replace: If True, the information about jobs and builds is
replaced by the new one, otherwise the new jobs and builds are
added.
:type local_dir: str
:type replace: bool
"""
if not isdir(local_dir):
raise PresentationError(
f"The directory {local_dir} does not exist."
)
# Check if the given directory includes only files, or only directories
_, dirnames, filenames = next(walk(local_dir))
if filenames and not dirnames:
filenames.sort()
# local_builds:
# key: dir (job) name, value: list of file names (builds)
local_builds = {
local_dir: [join(local_dir, name) for name in filenames]
}
elif dirnames and not filenames:
dirnames.sort()
# local_builds:
# key: dir (job) name, value: list of file names (builds)
local_builds = dict()
for dirname in dirnames:
builds = [
join(local_dir, dirname, name)
for name in listdir(join(local_dir, dirname))
if isfile(join(local_dir, dirname, name))
]
if builds:
local_builds[dirname] = sorted(builds)
elif not filenames and not dirnames:
raise PresentationError(f"The directory {local_dir} is empty.")
else:
raise PresentationError(
f"The directory {local_dir} can include only files or only "
f"directories, not both.\nThe directory {local_dir} includes "
f"file(s):\n{filenames}\nand directories:\n{dirnames}"
)
if replace:
self._cfg.builds = dict()
for job, files in local_builds.items():
for idx, local_file in enumerate(files):
self.process_local_file(local_file, job, idx + 1, replace=False)
@staticmethod
def _end_of_tag(tag_filter, start=0, closer=u"'"):
"""Return the index of character in the string which is the end of tag.
:param tag_filter: The string where the end of tag is being searched.
:param start: The index where the searching is stated.
:param closer: The character which is the tag closer.
:type tag_filter: str
:type start: int
:type closer: str
:returns: The index of the tag closer.
:rtype: int
"""
try:
idx_opener = tag_filter.index(closer, start)
return tag_filter.index(closer, idx_opener + 1)
except ValueError:
return None
@staticmethod
def _condition(tag_filter):
"""Create a conditional statement from the given tag filter.
:param tag_filter: Filter based on tags from the element specification.
:type tag_filter: str
:returns: Conditional statement which can be evaluated.
:rtype: str
"""
index = 0
while True:
index = InputData._end_of_tag(tag_filter, index)
if index is None:
return tag_filter
index += 1
tag_filter = tag_filter[:index] + u" in tags" + tag_filter[index:]
def filter_data(self, element, params=None, data=None, data_set=u"tests",
continue_on_error=False):
"""Filter required data from the given jobs and builds.
The output data structure is:
- job 1
- build 1
- test (or suite) 1 ID:
- param 1
- param 2
...
- param n
...
- test (or suite) n ID:
...
...
- build n
...
- job n
:param element: Element which will use the filtered data.
:param params: Parameters which will be included in the output. If None,
all parameters are included.
:param data: If not None, this data is used instead of data specified
in the element.
:param data_set: The set of data to be filtered: tests, suites,
metadata.
:param continue_on_error: Continue if there is error while reading the
data. The Item will be empty then
:type element: pandas.Series
:type params: list
:type data: dict
:type data_set: str
:type continue_on_error: bool
:returns: Filtered data.
:rtype pandas.Series
"""
try:
if data_set == "suites":
cond = u"True"
elif element[u"filter"] in (u"all", u"template"):
cond = u"True"
else:
cond = InputData._condition(element[u"filter"])
logging.debug(f" Filter: {cond}")
except KeyError:
logging.error(u" No filter defined.")
return None
if params is None:
params = element.get(u"parameters", None)
if params:
params.append(u"type")
data_to_filter = data if data else element[u"data"]
data = pd.Series()
try:
for job, builds in data_to_filter.items():
data[job] = | pd.Series() | pandas.Series |
import h5py
import numpy as np
import pandas as pd
import os
from multiprocessing import cpu_count, Pool
from alcokit.util import fs_dict, is_audio_file
from alcokit.hdf.api import Database
from alcokit.score import Score
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# TODO : add handling of sparse matrices ?
def default_extract_func(abs_path):
from alcokit.fft import FFT
fft = abs(FFT.stft(abs_path))
score = Score.from_recurrence_matrix(fft)
return dict(fft=({}, fft.T), score=({}, score))
def sizeof_fmt(num, suffix='b'):
"""
straight from https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def _empty_info(features_names):
tuples = [("directory", ""), ("name", ""),
*[t for feat in features_names for t in [(feat, "dtype"), (feat, "shape"), (feat, "size")]
if feat != "score"]
]
idx = pd.MultiIndex.from_tuples(tuples)
return pd.DataFrame([], columns=idx)
def split_path(path):
parts = path.split("/")
prefix, file_name = "/".join(parts[:-1]), parts[-1]
return prefix, file_name
def file_to_db(abs_path, extract_func=default_extract_func, mode="w"):
"""
if mode == "r+" this will either:
- raise an Exception if the feature already exists
- concatenate data along the "feature_axis", assuming that each feature correspond to the same file
or file collections.
If you want to concatenate dbs along the "file_axis" consider using `concatenate_dbs(..)`
@param abs_path:
@param extract_func:
@param mode:
@return:
"""
logger.info("making db for %s" % abs_path)
tmp_db = ".".join(abs_path.split(".")[:-1] + ["h5"])
rv = extract_func(abs_path)
info = _empty_info(rv.keys())
info.loc[0, [("directory", ""), ("name", "")]] = split_path(abs_path)
with h5py.File(tmp_db, mode) as f:
for name, (attrs, data) in rv.items():
if issubclass(type(data), np.ndarray):
ds = f.create_dataset(name=name, shape=data.shape, data=data)
ds.attrs.update(attrs)
info.loc[0, name] = ds.dtype, ds.shape, sizeof_fmt(data.nbytes)
elif issubclass(type(data), pd.DataFrame):
pd.DataFrame(data).to_hdf(tmp_db, name, "r+")
f.flush()
if "info" in f.keys():
prior = pd.read_hdf(tmp_db, "info", "r")
info = | pd.concat((prior, info.iloc[:, 2:]), axis=1) | pandas.concat |
"""Validation methods for contributing to RDT."""
import inspect
import subprocess
import traceback
from pathlib import Path
import coverage
import numpy as np
import pandas as pd
import pytest
from tabulate import tabulate
from rdt.performance import evaluate_transformer_performance
from rdt.performance.datasets import get_dataset_generators_by_type
from rdt.transformers import get_transformer_class, get_transformers_by_type
from tests.code_style import (
get_test_location, validate_test_location, validate_test_names, validate_transformer_addon,
validate_transformer_importable_from_parent_module, validate_transformer_module,
validate_transformer_subclass)
from tests.integration.test_transformers import validate_transformer
from tests.performance import validate_performance
from tests.quality.test_quality import (
TEST_THRESHOLD, get_regression_scores, get_results_table, get_test_cases)
# Mapping of validation method to (check name, check description).
CHECK_DETAILS = {
'_validate_dataset_generators': (
'Dataset Generators',
'At least one Dataset Generator exists for the Transformer sdtype.',
),
'_validate_transformed_data': (
'Output Sdtypes',
'The Transformer can transform data and produce output(s) of the indicated sdtype(s).',
),
'_validate_reverse_transformed_data': (
'Reverse Transform',
(
'The Transformer can reverse transform the data it produces, going back to the '
'original sdtype.'
),
),
'_validate_composition': (
'Composition is Identity',
(
'Transforming data and reversing it recovers the original data, if composition is '
'identity is specified.'
),
),
'_validate_hypertransformer_transformed_data': (
'Hypertransformer can transform',
'The HyperTransformer is able to use the Transformer and produce float values.',
),
'_validate_hypertransformer_reverse_transformed_data': (
'Hypertransformer can reverse transform',
(
'The HyperTransformer is able to reverse the data that it has previously transformed '
'and restore the original sdtype.'
),
),
}
# Allowed paths for file modifications
VALID_PATHS = [
'rdt/transformers/',
'rdt/transformers/addons/',
'tests/unit/transformers/',
'tests/unit/transformers/addons/',
'tests/integration/transformers/',
'tests/datasets/'
]
def validate_transformer_integration(transformer):
"""Validate the integration tests of a transformer.
This function runs the automated integration test functions on the Transformer.
It will print to console a summary of the integration tests, along with which
checks have passed or failed.
Args:
transformer (string or rdt.transformers.BaseTransformer):
The transformer to validate.
Returns:
bool:
Whether or not the transformer passes all integration checks.
"""
if isinstance(transformer, str):
transformer = get_transformer_class(transformer)
print(f'Validating Integration Tests for transformer {transformer.__name__}\n')
steps = []
validation_error = None
error_trace = None
try:
validate_transformer(transformer, steps=steps)
except Exception as error:
error_trace = ''.join(traceback.TracebackException.from_exception(error).format())
for check in CHECK_DETAILS:
if check in error_trace:
validation_error = str(error)
if validation_error is None and error_trace is None:
print('SUCCESS: The integration tests were successful.\n')
elif validation_error:
print('ERROR: One or more integration tests were NOT successful.\n')
elif error_trace:
print('ERROR: Transformer errored out with the following error:\n')
print(error_trace)
result_summaries = []
seen_checks = set()
failed_step = None if validation_error is None else steps[-1]
for step in steps:
check, details = CHECK_DETAILS[step]
if check in seen_checks:
continue
seen_checks.add(check)
if failed_step and step == failed_step:
result_summaries.append([check, 'No', validation_error])
else:
result_summaries.append([check, 'Yes', details])
summary = pd.DataFrame(result_summaries, columns=['Check', 'Correct', 'Details'])
print(tabulate(summary, headers='keys', showindex=False))
return validation_error is None and error_trace is None
def _validate_third_party_code_style(command, tag, success_message,
error_message, transformer_path):
run_command = command.split(' ')
run_command.append(transformer_path)
output_capture = subprocess.run(run_command, capture_output=True).stdout.decode()
if output_capture:
return {
'Check': tag,
'Correct': 'No',
'Details': error_message,
'output_capture': output_capture,
}
return {
'Check': tag,
'Correct': 'Yes',
'Details': success_message,
}
def _custom_validation(function, tag, success_message, error_message, transformer):
try:
function(transformer)
return {
'Check': tag,
'Correct': 'Yes',
'Details': success_message,
}
except AssertionError as error:
return {
'Check': tag,
'Correct': 'No',
'Details': error_message,
'output_capture': error
}
def _validate_third_party_checks(transformer_path):
results = [
_validate_third_party_code_style(
'flake8',
'flake8',
'Code follows PEP8 standards.',
'Code must follow PEP8 standards.',
transformer_path
),
_validate_third_party_code_style(
'isort -c',
'isort',
'Imports are properly sorted.',
'Imports are not properly sorted.',
transformer_path
),
_validate_third_party_code_style(
'pylint --rcfile=setup.cfg ',
'pylint',
'Code is properly formatted and structured.',
'Code is not properly formatted and structured.',
transformer_path
),
_validate_third_party_code_style(
'pydocstyle',
'pydocstyle',
'The docstrings are properly written.',
'The docstrings are not properly written.',
transformer_path
)
]
return results
def _validate_custom_checks(transformer):
results = [
_custom_validation(
validate_transformer_subclass,
'Transformer is subclass',
'The transformer is subclass of ``BaseTransformer``.',
'The transformer must be a subclass of ``BaseTransformer``.',
transformer
),
_custom_validation(
validate_transformer_module,
'Valid module',
'The transformer is placed inside a valid module.',
'The transformer is not placed inside a valid module.',
transformer
),
_custom_validation(
validate_test_location,
'Valid test module',
'The transformer tests are placed inside the valid module.',
'The transformer tests are not placed inside the valid module.',
transformer
),
_custom_validation(
validate_test_names,
'Valid test function names',
'The transformer tests are named correctly.',
'The transformer tests are not named properly.',
transformer
),
_custom_validation(
validate_transformer_addon,
'Valid transformer addon',
'The addon is configured properly.',
'The addon is not configured properly.',
transformer
),
_custom_validation(
validate_transformer_importable_from_parent_module,
'Importable from module',
'The transformer can be imported from the parent module.',
'The transformer can not be imported from the parent module.',
transformer
)
]
return results
def validate_transformer_code_style(transformer):
"""Validate all third party code style checkers as well as custom code analysis.
This function validates whether or not a ``rdt.transformers.BaseTransformer`` subclass
is following the standard code style checks (``flake8``, ``isort``, ``pylint``, ...) and
additionally custom made code style validations for ``RDT``.
Args:
transformer (string or rdt.transformers.BaseTransformer):
The transformer to validate.
Returns:
bool:
Whether or not the transformer passes all code style checks.
"""
if not inspect.isclass(transformer):
transformer = get_transformer_class(transformer)
transformer_path = inspect.getfile(transformer)
print(f'Validating source file {transformer_path}')
results = (_validate_third_party_checks(transformer_path))
results.extend(_validate_custom_checks(transformer))
errors = [
(result.get('Check'), result.pop('output_capture'))
for result in results
if 'output_capture' in result
]
valid = not bool(errors)
if valid:
print('\nSUCCESS: The code style is correct.\n')
else:
print('\nERROR the code style is NOT correct.\n')
table = pd.DataFrame(results)
print(tabulate(table, headers='keys', showindex=False))
for check, error in errors:
print(f"\nThe check '{check}' produced the following error/s:")
print(error)
return not bool(errors)
def validate_transformer_unit_tests(transformer):
"""Validate the unit tests of a transformer.
This function finds the module where the unit tests of the transformer
have been implemented and runs them using ``pytest``, capturing the code
coverage of the tests (how many lines of the source code are executed
during the tests).
Args:
transformer (string or rdt.transformers.BaseTransformer):
The transformer to validate.
Returns:
float:
A ``float`` value representing the test coverage where 1.0 is 100%.
"""
if not inspect.isclass(transformer):
transformer = get_transformer_class(transformer)
source_location = inspect.getfile(transformer)
test_location = get_test_location(transformer)
module_name = getattr(transformer, '__module__', None)
print(f'Validating source file {source_location}\n')
pytest_run = f'-v --disable-warnings --no-header {test_location}'
pytest_run = pytest_run.split(' ')
cov = coverage.Coverage(source=[module_name])
cov.start()
pytest_output = pytest.main(pytest_run)
cov.stop()
if pytest_output is pytest.ExitCode.OK:
print('\nSUCCESS: The unit tests passed.')
else:
print('\nERROR: The unit tests failed.')
score = cov.report(show_missing=True)
rounded_score = round(score / 100, 3)
if rounded_score < 1.0:
print(f'\nERROR: The unit tests only cover {round(score, 3)}% of your code.')
else:
print(f'\nSUCCESS: The unit tests cover {round(score, 3)}% of your code.')
cov.html_report()
print('\nFull coverage report here:\n')
coverage_name = module_name.replace('.', '_')
export_dir = Path('htmlcov') / f'{coverage_name}_py.html'
print(export_dir.absolute().as_uri())
return rounded_score
def validate_transformer_quality(transformer):
"""Validate quality tests for a transformer.
This function creates a DataFrame containing the results
from running the quality tests for this transformer against
all the datasets with columns of its input sdtype. It does the
following steps:
1. A DataFrame containing the regression scores obtained from running the
transformers of the input sdtype against the datasets in the test cases is
created. Each row in the DataFrame has the transformer name, dataset name,
column name and score. The scores are computed as follows:
- For every transformer of the sdtype, transform all the
columns of that sdtype.
- For every numerical column in the dataset, the transformed
columns are used as features to train a regression model.
- The score is the coefficient of determination obtained from
that model trying to predict the target column.
2. Once the scores are gathered, a results table is created. Each row has
a transformer name, dataset name, average score for the dataset,
a score comparing the transformer's average score for the dataset to
the average of the average score for the dataset across all transformers of
the same sdtype, and whether or not the score passed the test threshold.
Returns:
DataFrame containing the following columns for each dataset the transformer
is validated against: ``Dataset``, ``Score``, ``Compared To Average``, ``Acceptable``.
"""
if isinstance(transformer, str):
transformer = get_transformer_class(transformer)
print(f'Validating Quality Tests for transformer {transformer.__name__}\n')
input_sdtype = transformer.get_input_sdtype()
test_cases = get_test_cases({input_sdtype})
regression_scores = get_regression_scores(test_cases, get_transformers_by_type())
results = get_results_table(regression_scores)
transformer_results = results[results['transformer_name'] == transformer.__name__]
transformer_results = transformer_results.drop('transformer_name', axis=1)
transformer_results['Acceptable'] = False
passing_relative_scores = transformer_results['score_relative_to_average'] > TEST_THRESHOLD
acceptable_indices = passing_relative_scores | (transformer_results['score'] > TEST_THRESHOLD)
transformer_results.loc[acceptable_indices, 'Acceptable'] = True
new_names = {
'dataset_name': 'Dataset',
'score': 'Score',
'score_relative_to_average': 'Compared To Average'
}
transformer_results = transformer_results.rename(columns=new_names)
if transformer_results['Acceptable'].all():
print('SUCCESS: The quality tests were successful.\n')
else:
print('Failure: The quality tests were NOT successful.\n')
return transformer_results.reset_index(drop=True)
def validate_transformer_performance(transformer):
"""Validate the performance of a transformer.
Run the specified Transformer on all the Dataset Generators of the indicated sdtype
and produce a report about its performance and how it compares to the other
Transformers of the same sdtype.
Args:
transformer (string or rdt.transformers.BaseTransformer):
The transformer to validate.
Returns:
pandas.DataFrame:
Performance results of the transformer.
"""
if isinstance(transformer, str):
transformer = get_transformer_class(transformer)
print(f'Validating Performance for transformer {transformer.__name__}\n')
sdtype = transformer.get_input_sdtype()
transformers = get_transformers_by_type().get(sdtype, [])
dataset_generators = get_dataset_generators_by_type().get(sdtype, [])
total_results = pd.DataFrame()
for current_transformer in transformers:
for dataset_generator in dataset_generators:
performance = evaluate_transformer_performance(current_transformer, dataset_generator)
valid = validate_performance(performance, dataset_generator)
results = pd.DataFrame({
'Value': performance.to_numpy(),
'Valid': valid,
'transformer': current_transformer.__name__,
'dataset': dataset_generator.__name__,
})
results['Evaluation Metric'] = performance.index
total_results = total_results.append(results)
if total_results['Valid'].all():
print('SUCCESS: The Performance Tests were successful.')
else:
print('ERROR: One or more Performance Tests were NOT successful.')
other_results = total_results[total_results.transformer != transformer.__name__]
average = other_results.groupby('Evaluation Metric')['Value'].mean()
total_results = total_results[total_results.transformer == transformer.__name__]
final_results = total_results.groupby('Evaluation Metric').agg({
'Value': 'mean',
'Valid': 'any'
})
final_results = final_results.rename(columns={'Valid': 'Acceptable'})
final_results['Units'] = np.where(
final_results.index.str.contains('Time'),
's / row',
'B / row',
)
final_results['Acceptable'] = np.where(final_results['Acceptable'], 'Yes', 'No')
final_results['Compared to Average'] = final_results['Value'].div(average).replace(
np.inf, np.nan)
return final_results.reset_index()
def check_clean_repository():
"""Check whether or not the repository has only expected changes.
This function checks if there are unexpected changes on the current branch
against the master branch. If there are modifications outside ``rdt/transformers``,
``tests/unit``, ``tests/integration`` or ``tests/datasets`` this will return a ``False``.
Returns:
bool:
``True`` if the changes are applied only to the expected subfolders, ``False``
if any other file has been modified outside of that range.
"""
run_command = 'git diff --name-only master'.split(' ')
output_capture = subprocess.run(run_command, capture_output=True).stdout.decode()
output_capture = output_capture.splitlines()
validated_paths = []
count = 0
for capture in output_capture:
file_path = Path(capture)
for valid_path in VALID_PATHS:
if any([
file_path.match(valid_path),
file_path.parent.match(valid_path),
file_path.parent.parent.match(valid_path)
]):
validated_paths.append(True)
if len(validated_paths) == count:
print(f'\nUnexpected changes to: {file_path}')
validated_paths.append(False)
return all(validated_paths)
def _build_validation_dict(tag, result, success_details, error_details):
return {
'Check': tag,
'Correct': 'Yes' if result else 'No',
'Details': success_details if result else error_details,
}
def validate_pull_request(transformer):
"""Validate whether a pull request can be made for a ``Transformer``.
Runs all the validations for a ``Transformer`` and also checks if there
are unexpected modifications to the repository other than the ``transformers``,
``tests`` and ``tests/datasets``.
Args:
transformer (string or rdt.transformers.BaseTransformer):
The transformer to validate.
Returns:
bool:
Boolean indicating whether or not a pull request can be made.
"""
if not inspect.isclass(transformer):
transformer = get_transformer_class(transformer)
code_style = validate_transformer_code_style(transformer)
unit_tests = validate_transformer_unit_tests(transformer)
integration_tests = validate_transformer_integration(transformer)
performance_tests = validate_transformer_performance(transformer)
quality_tests = validate_transformer_quality(transformer)
clean_repository = check_clean_repository()
unit_bool = unit_tests == 1.0
performance_bool = 'No' not in performance_tests['Acceptable'].unique()
quality_bool = quality_tests['Acceptable'].all()
results = [
_build_validation_dict(
'Code Style',
code_style,
'Code Style is acceptable.',
'Code Style is unacceptable!'
),
_build_validation_dict(
'Unit Tests',
unit_bool,
'The unit tests are correct and run successfully.',
'The unit tests did not run successfully or the coverage is not a 100%.'
),
_build_validation_dict(
'Integration tests',
integration_tests,
'The integration tests run successfully.',
'The integration tests did not run successfully!',
),
_build_validation_dict(
'Performance Tests',
performance_bool,
'The performance of the transformer is acceptable.',
'The performance of the transformer is unacceptable!'
),
_build_validation_dict(
'Quality tests',
quality_bool,
'The output data quality is acceptable.',
'The output data quality is unacceptable.',
),
_build_validation_dict(
'Clean Repository',
clean_repository,
'There are no unexpected changes in the repository.',
'There are unexpected changes in the repository!'
),
]
results = | pd.DataFrame(results) | pandas.DataFrame |
# coding: utf-8
# Author: <NAME>
import os
import sys
import traceback
from datetime import datetime
import pandas as pd
import numpy as np
import woe_tools as woe
usage = '''
################################### Summarize #######################################
此工具包用于数据预处理,包含以下内容:
1.Cap
2.Floor
3.MissingImpute
4.Woe
5.Normalize
6.Scale
7.Tactic
-------------------------------------------------------------------------------------
使用说明:
import pandas as pd
import numpy as np
import preprocess as pp
df_train = pd.read_csv('train_data.csv')
df_test = pd.read_csv('test_data.csv')
df_config = pd.read_csv('edd_config.csv')
# 调用单个组件:
operation = pp.MissingImpute(df_config)
df_reference = operation.fit(df_train)
df_train = operation.apply(df_train)
df_test = operation.apply(df_test)
# 设计整个数据预处理流程:
process = pp.Tactic(df_config, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
process.summary()
df_reference = process.fit(df_train)
df_train = process.apply(df_train)
df_test = process.apply(df_test)
process.save_reference('./edd_reference.csv')
# 也可以通过读入一个已经生成的reference table,直接对数据进行apply处理
df_reference = pd.read_csv('edd_reference.csv')
process = pp.Tactic(df_reference, process_list=[pp.Cap, pp.Floor, pp.MissingImpute, pp.Woe], target='target')
df_train = process.apply(df_train)
df_test = process.apply(df_test)
---------------------------------------------------------------------------------------
注意事项:
1. 不要在数据存在缺失值的情况下进行woe处理;
2. 当处理流程中包含woe时,必须指定target,否则会报错;
3. 对于一个新的数据集,第一次做处理时最好分步进行预处理,方便检查每步的输出是否正确。
#######################################################################################
'''
def __map_feature_type(t, time_as_num=False):
"""
convert the dataFrame type to feature type (Numerical or Categorical)
"""
if t in (int, np.int64, np.int32, np.int16, bool, float, np.float32, np.float64, np.float128):
return 'numerical'
elif t in (str,):
return 'categorical'
elif t in (pd.tslib.Timestamp, ):
return 'numerical' if time_as_num else 'timestamp'
def __extract_feature_type(df, known_columns={}):
"""
extract columns type of a dataframe and map it
"""
col_list = []
for var in df.columns:
if var in known_columns:
col_list.append((var, known_columns[var]))
continue
var_type = __map_feature_type(df[var].dtype.type)
if var_type is not None:
col_list.append((var, var_type))
continue
type_set = set(df[var][~df[var].isnull()].apply(lambda x: type(x)))
if len(type_set) == 1:
var_type = __map_feature_type(type_set.pop())
if var_type is not None:
col_list.append((var, var_type))
continue
raise ValueError('Unknown type of column "{0}" as {1}'.format(var, type_set))
return col_list
def create_edd_config(df_master, known_columns={}, save_path=None):
"""
生成数据预处理的config文件
Parameters
----------
df_master:
DataFrame
known_columns: dict, default {}
已知的列类型,eg. {'age': 'numerical, 'sex': 'categorical'}
save_path: str, default None
Returns
-------
df_config: DataFrame
预处理的配置文件
"""
column_type = __extract_feature_type(df_master, known_columns=known_columns)
df_config = pd.DataFrame(column_type, columns=['Var_Name', 'Var_Type'])
df_config['Ind_Model'] = 1 # 是否进模型
df_config['Ind_Cap'] = 0 # 是否进行Cap处理
df_config['Cap_Value'] = None
df_config['Ind_Floor'] = 0 # 是否进行Floor处理
df_config['Floor_Value'] = None
df_config['Missing_Impute'] = -1 # 填入的缺失值,数值型变量默认为-1,字符变量默认为'missing'
df_config.loc[df_config['Var_Type'] == 'categorical', 'Missing_Impute'] = 'missing'
df_config['Ind_WOE'] = 0 # 是否做WOE变换,默认数值型变量不做变换,字符型变量做
df_config.loc[df_config['Var_Type'] == 'categorical', 'Ind_WOE'] = 1
df_config['WOE_Bin'] = None
df_config['Ind_Norm'] = 0 # 是否进行normalize
df_config['Ind_Scale'] = 0 # 是否进行min-max scale
for var in df_config['Var_Name'][df_config['Var_Type'] == 'numerical'].tolist():
if df_master[var].max() > (5 * df_master[var].quantile(0.99)):
df_config.loc[df_config['Var_Name'] == var, 'Ind_Cap'] = 1
df_config.to_csv(save_path, index=False, encoding='utf-8')
return df_config
class Cap(object):
"""
Descriptions
------------
对变量做cap处理,主要包括以下几点:
1. 只对numerical的变量做处理
2. cap操作默认用5倍p99(有指定值优先用指定值)
3. 对missing值不处理
Atributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的cap值
apply: 根据reference table对变量做cap处理
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件(必填)
apply_list: list, default None
需要处理的变量列表,若未指定,则为config文件中Ind_Cap=1的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Ind_Cap'] == 1)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的cap值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
for var in self.apply_list:
df_config_var = self.config[self.config['Var_Name'] == var]
if df_config_var['Cap_Value'].isnull().iloc[0] == True:
cap_value = df_master[var][~df_master[var].isnull()].quantile(0.99) # 忽略缺失值
else:
cap_value = float(df_config_var['Cap_Value'].iloc[0])
self.reference.loc[self.reference['Var_Name'] == var, 'Cap_Value'] = cap_value
return self.reference
def apply(self, df_master):
"""
根据reference table对变量做cap处理
Parameters
----------
df_master: DataFrame
"""
for var in self.apply_list:
cap_value = float(self.reference['Cap_Value'][self.reference['Var_Name'] == var].iloc[0])
if pd.isnull(cap_value):
raise ValueError('Not found cap value of "{0}"'.format(var))
df_master[var] = np.where(df_master[var] > cap_value, cap_value, df_master[var])
return df_master
class Floor(object):
"""
Descriptions
------------
对变量做floor处理,主要包括以下几点:
1. 只对numerical的变量做处理
2. 只对小于0的值做处理,默认用5p1(有指定值优先用指定值)
3. 对missing值不处理
Attributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的floor值
apply: 根据reference table对变量做floor处理
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件
apply_list: list, default None
需要处理的变量列表,若未指定,则为config中Ind_Floor=1的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Ind_Floor'] == 1)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的floor值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
for var in self.apply_list:
df_config_var = self.config[self.config['Var_Name'] == var]
if df_config_var['Floor_Value'].isnull().iloc[0] == True:
floor_value = min(5 * df_master[var][~df_master[var].isnull()].quantile(0.01), 0)
else:
floor_value = float(df_config_var['Floor_Value'].iloc[0])
self.reference.loc[self.reference['Var_Name'] == var, 'Floor_Value'] = floor_value
return self.reference
def apply(self, df_master):
"""
根据reference table对变量做floor处理
Parameters
----------
df_master: DataFrame
"""
for var in self.apply_list:
floor_value = float(self.reference['Floor_Value'][self.reference['Var_Name'] == var].iloc[0])
if pd.isnull(floor_value):
raise ValueError('Not found floor value of "{0}"'.format(var))
df_master[var] = np.where(df_master[var] < floor_value, floor_value, df_master[var])
return df_master
class MissingImpute(object):
"""
Descriptions
------------
对变量进行缺失值填充,主要包括以下几点:
1. 对于numerical变量有mean/median/指定值三种填充方式
2. 对于categorical变量有mode/指定值两种填充方式
3. 某个变量存在缺失值但没有指定填充值时会给出警告
Attributes
---------
config: DataFrame
config table
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的填充值
apply: 根据reference table对变量进行缺失值填充
"""
def __init__(self, df_config, apply_list=None):
"""
Parameters
----------
df_config: DataFrame
数据预处理的config文件
apply_list: list, default None
需要处理的变量列表,若未指定,则为config文件中Missing_Impute不为空的变量
"""
self.config = df_config
self.reference = df_config.copy()
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Missing_Impute'].isnull() == False)])
else:
self.apply_list = apply_list
def fit(self, df_master):
"""
计算变量的填充值
Parameters
----------
df_master: DataFrame
Returns
-------
reference: DataFrame
reference table
"""
missing_cnt = df_master.isnull().sum() # 统计各变量的缺失值数量
missing_vars = list(missing_cnt[missing_cnt > 0].index) # 筛选存在缺失值的变量
for var in list(self.config['Var_Name'][self.config['Ind_Model'] == 1]):
df_config_var = self.config[self.config['Var_Name'] == var]
# 确定numerical变量的填充值
if df_config_var['Var_Type'].iloc[0] == 'numerical':
if df_config_var['Missing_Impute'].iloc[0] == 'mean':
impute_value = df_master[var].mean()
elif df_config_var['Missing_Impute'].iloc[0] == 'median':
impute_value = df_master[var].median()
elif df_config_var['Missing_Impute'].isnull().iloc[0] == False:
impute_value = float(df_config_var['Missing_Impute'].iloc[0])
else:
impute_value = None
# 确定categorical变量的填充值
elif df_config_var['Var_Type'].iloc[0] == 'categorical':
if df_config_var['Missing_Impute'].iloc[0] == 'mode':
impute_value = df_master[var].mode().iloc[0]
elif df_config_var['Missing_Impute'].isnull().iloc[0] == False:
impute_value = df_config_var['Missing_Impute'].iloc[0]
else:
impute_value = None
# 未知的变量类型报错
else:
raise TypeError('Wrong type for:{0}'.format(var))
# 更新config文件
self.reference.loc[self.reference['Var_Name'] == var, 'Missing_Impute'] = impute_value
# 检查存在缺失值但未指定填充值的变量
if var in list(self.config['Var_Name'][self.config['Ind_Model'] == 1]) and var in missing_vars:
if impute_value is None:
print('"{0}" exist missing value but no impute!'.format(var))
return self.reference
def apply(self, df_master):
"""
根据reference table对变量进行缺失值填充
Parameters
----------
df_master: DataFrame
"""
missing_cnt = df_master.isnull().sum()
missing_vars = list(missing_cnt[missing_cnt > 0].index)
for var in self.apply_list:
if var not in missing_vars:
continue
if self.reference['Var_Type'][self.reference['Var_Name'] == var].iloc[0] == 'numerical':
impute_value = float(self.reference['Missing_Impute'][self.reference['Var_Name'] == var].iloc[0])
else:
impute_value = self.reference['Missing_Impute'][self.reference['Var_Name'] == var].iloc[0]
if pd.isnull(impute_value):
raise ValueError('Not found impute value of "{0}"'.format(var))
df_master[var] = df_master[var].fillna(impute_value)
return df_master
class Woe(object):
"""
Descriptions
------------
对变量做woe处理,主要包括以下几点:
1. 分numerical和categorical两类变量处理
2. 默认采用自动分bin形式,如果有自填bin,优先使用
3. save_path默认为"./woe",也可在fit时指定
Attributes
----------
config: DataFrame
config table
target: str
target变量名
reference: DataFrame
reference table
apply_list: list
需要处理的变量列表
Method
------
fit: 计算变量的woe和iv值
apply: 根据reference table对变量进行woe替换,并删除woe前的原始变量
"""
def __init__(self, df_config, target, woe_ref=None, apply_list=None):
"""
Parameters
----------
df_config: DataFram
数据预处理的config文件
target: str
target变量的名字
woe_ref: DataFrame, default None
woe reference table
apply_list: list, default None
需要处理的变量列表,若未指定,则为config文件中Ind_WOE=1的变量
"""
self.config = df_config
self.reference = woe_ref
self.target = target
if apply_list is None:
self.apply_list = list(df_config['Var_Name'][(df_config['Ind_Model'] == 1) & (df_config['Ind_WOE'] == 1)])
else:
self.apply_list = apply_list
def fit(self, df_master, batch_save=0, to_plot=True, save_path=os.getcwd()+'/woe_result'):
"""
计算变量的woe和iv值
Parameters
----------
df_master: DataFrame
batch_save: int
分批储存,每隔多少个变量存一次reference文件,默认为0,即不分批存储
to_plot: bool, default True
是否绘图
save_path: str
woe图和reference table的输出路径,默认为'./woe_result'
Returns
-------
reference: DataFrame
woe reference table
"""
df_woe_config = self.config[[var in self.apply_list for var in self.config['Var_Name']]]
df_ref_num = pd.DataFrame()
df_ref_cag = | pd.DataFrame() | pandas.DataFrame |
"""
Provide a generic structure to support window functions,
similar to how we have a Groupby object.
"""
from collections import defaultdict
from datetime import timedelta
from textwrap import dedent
from typing import List, Optional, Set
import warnings
import numpy as np
import pandas._libs.window as libwindow
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, Substitution, cache_readonly
from pandas.core.dtypes.common import (
ensure_float64,
is_bool,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDateOffset,
ABCDatetimeIndex,
ABCPeriodIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas._typing import Axis, FrameOrSeries
from pandas.core.base import DataError, PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.generic import _shared_docs
from pandas.core.groupby.base import GroupByMixin
_shared_docs = dict(**_shared_docs)
_doc_template = """
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
class _Window(PandasObject, SelectionMixin):
_attributes = [
"window",
"min_periods",
"center",
"win_type",
"axis",
"on",
"closed",
] # type: List[str]
exclusions = set() # type: Set[str]
def __init__(
self,
obj,
window=None,
min_periods: Optional[int] = None,
center: Optional[bool] = False,
win_type: Optional[str] = None,
axis: Axis = 0,
on: Optional[str] = None,
closed: Optional[str] = None,
**kwargs
):
self.__dict__.update(kwargs)
self.obj = obj
self.on = on
self.closed = closed
self.window = window
self.min_periods = min_periods
self.center = center
self.win_type = win_type
self.win_freq = None
self.axis = obj._get_axis_number(axis) if axis is not None else None
self.validate()
@property
def _constructor(self):
return Window
@property
def is_datetimelike(self) -> Optional[bool]:
return None
@property
def _on(self):
return None
@property
def is_freq_type(self) -> bool:
return self.win_type == "freq"
def validate(self):
if self.center is not None and not is_bool(self.center):
raise ValueError("center must be a boolean")
if self.min_periods is not None and not is_integer(self.min_periods):
raise ValueError("min_periods must be an integer")
if self.closed is not None and self.closed not in [
"right",
"both",
"left",
"neither",
]:
raise ValueError("closed must be 'right', 'left', 'both' or " "'neither'")
def _create_blocks(self):
"""
Split data into blocks & return conformed data.
"""
obj = self._selected_obj
# filter out the on from the object
if self.on is not None:
if obj.ndim == 2:
obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False)
blocks = obj._to_dict_of_blocks(copy=False).values()
return blocks, obj
def _gotitem(self, key, ndim, subset=None):
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : str / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
self = self._shallow_copy(subset)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
raise AttributeError(
"%r object has no attribute %r" % (type(self).__name__, attr)
)
def _dir_additions(self):
return self.obj._dir_additions()
def _get_window(self, other=None):
return self.window
@property
def _window_type(self) -> str:
return self.__class__.__name__
def __repr__(self) -> str:
"""
Provide a nice str repr of our rolling object.
"""
attrs = (
"{k}={v}".format(k=k, v=getattr(self, k))
for k in self._attributes
if getattr(self, k, None) is not None
)
return "{klass} [{attrs}]".format(
klass=self._window_type, attrs=",".join(attrs)
)
def __iter__(self):
url = "https://github.com/pandas-dev/pandas/issues/11704"
raise NotImplementedError("See issue #11704 {url}".format(url=url))
def _get_index(self) -> Optional[np.ndarray]:
"""
Return index as an ndarray.
Returns
-------
None or ndarray
"""
if self.is_freq_type:
return self._on.asi8
return None
def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert input to numpy arrays for Cython routines"""
if values is None:
values = getattr(self._selected_obj, "values", self._selected_obj)
# GH #12373 : rolling functions error on float32 data
# make sure the data is coerced to float64
if is_float_dtype(values.dtype):
values = ensure_float64(values)
elif is_integer_dtype(values.dtype):
values = ensure_float64(values)
elif needs_i8_conversion(values.dtype):
raise NotImplementedError(
"ops for {action} for this "
"dtype {dtype} are not "
"implemented".format(action=self._window_type, dtype=values.dtype)
)
else:
try:
values = ensure_float64(values)
except (ValueError, TypeError):
raise TypeError(
"cannot handle this type -> {0}" "".format(values.dtype)
)
# Always convert inf to nan
values[np.isinf(values)] = np.NaN
return values
def _wrap_result(self, result, block=None, obj=None) -> FrameOrSeries:
"""
Wrap a single result.
"""
if obj is None:
obj = self._selected_obj
index = obj.index
if isinstance(result, np.ndarray):
# coerce if necessary
if block is not None:
if is_timedelta64_dtype(block.values.dtype):
from pandas import to_timedelta
result = to_timedelta(result.ravel(), unit="ns").values.reshape(
result.shape
)
if result.ndim == 1:
from pandas import Series
return Series(result, index, name=obj.name)
return type(obj)(result, index=index, columns=block.columns)
return result
def _wrap_results(self, results, blocks, obj, exclude=None) -> FrameOrSeries:
"""
Wrap the results.
Parameters
----------
results : list of ndarrays
blocks : list of blocks
obj : conformed data (may be resampled)
exclude: list of columns to exclude, default to None
"""
from pandas import Series, concat
from pandas.core.index import ensure_index
final = []
for result, block in zip(results, blocks):
result = self._wrap_result(result, block=block, obj=obj)
if result.ndim == 1:
return result
final.append(result)
# if we have an 'on' column
# we want to put it back into the results
# in the same location
columns = self._selected_obj.columns
if self.on is not None and not self._on.equals(obj.index):
name = self._on.name
final.append(Series(self._on, index=obj.index, name=name))
if self._selection is not None:
selection = ensure_index(self._selection)
# need to reorder to include original location of
# the on column (if its not already there)
if name not in selection:
columns = self.obj.columns
indexer = columns.get_indexer(selection.tolist() + [name])
columns = columns.take(sorted(indexer))
# exclude nuisance columns so that they are not reindexed
if exclude is not None and exclude:
columns = [c for c in columns if c not in exclude]
if not columns:
raise DataError("No numeric types to aggregate")
if not len(final):
return obj.astype("float64")
return concat(final, axis=1).reindex(columns=columns, copy=False)
def _center_window(self, result, window) -> np.ndarray:
"""
Center the result in the window.
"""
if self.axis > result.ndim - 1:
raise ValueError(
"Requested axis is larger then no. of argument " "dimensions"
)
offset = _offset(window, True)
if offset > 0:
if isinstance(result, (ABCSeries, ABCDataFrame)):
result = result.slice_shift(-offset, axis=self.axis)
else:
lead_indexer = [slice(None)] * result.ndim
lead_indexer[self.axis] = slice(offset, None)
result = np.copy(result[tuple(lead_indexer)])
return result
def aggregate(self, func, *args, **kwargs):
result, how = self._aggregate(func, *args, **kwargs)
if result is None:
return self.apply(func, raw=False, args=args, kwargs=kwargs)
return result
agg = aggregate
_shared_docs["sum"] = dedent(
"""
Calculate %(name)s sum of given DataFrame or Series.
Parameters
----------
*args, **kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed value.
Returns
-------
Series or DataFrame
Same type as the input, with the same index, containing the
%(name)s sum.
See Also
--------
Series.sum : Reducing sum for Series.
DataFrame.sum : Reducing sum for DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.rolling(3).sum()
0 NaN
1 NaN
2 6.0
3 9.0
4 12.0
dtype: float64
>>> s.expanding(3).sum()
0 NaN
1 NaN
2 6.0
3 10.0
4 15.0
dtype: float64
>>> s.rolling(3, center=True).sum()
0 NaN
1 6.0
2 9.0
3 12.0
4 NaN
dtype: float64
For DataFrame, each %(name)s sum is computed column-wise.
>>> df = pd.DataFrame({"A": s, "B": s ** 2})
>>> df
A B
0 1 1
1 2 4
2 3 9
3 4 16
4 5 25
>>> df.rolling(3).sum()
A B
0 NaN NaN
1 NaN NaN
2 6.0 14.0
3 9.0 29.0
4 12.0 50.0
"""
)
_shared_docs["mean"] = dedent(
"""
Calculate the %(name)s mean of the values.
Parameters
----------
*args
Under Review.
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.mean : Equivalent method for Series.
DataFrame.mean : Equivalent method for DataFrame.
Examples
--------
The below examples will show rolling mean calculations with window sizes of
two and three, respectively.
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).mean()
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
>>> s.rolling(3).mean()
0 NaN
1 NaN
2 2.0
3 3.0
dtype: float64
"""
)
class Window(_Window):
"""
Provide rolling window calculations.
.. versionadded:: 0.18.0
Parameters
----------
window : int, or offset
Size of the moving window. This is the number of observations used for
calculating the statistic. Each window will be a fixed size.
If its an offset then this will be the time period of each window. Each
window will be a variable sized based on the observations included in
the time-period. This is only valid for datetimelike indexes. This is
new in 0.19.0
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). For a window that is specified by an offset,
`min_periods` will default to 1. Otherwise, `min_periods` will default
to the size of the window.
center : bool, default False
Set the labels at the center of the window.
win_type : str, default None
Provide a window type. If ``None``, all points are evenly weighted.
See the notes below for further information.
on : str, optional
For a DataFrame, a datetime-like column on which to calculate the rolling
window, rather than the DataFrame's index. Provided integer column is
ignored and excluded from result since an integer index is not used to
calculate the rolling window.
axis : int or str, default 0
closed : str, default None
Make the interval closed on the 'right', 'left', 'both' or
'neither' endpoints.
For offset-based windows, it defaults to 'right'.
For fixed windows, defaults to 'both'. Remaining cases not implemented
for fixed windows.
.. versionadded:: 0.20.0
Returns
-------
a Window or Rolling sub-classed for the particular operation
See Also
--------
expanding : Provides expanding transformations.
ewm : Provides exponential weighted functions.
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
To learn more about the offsets & frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
The recognized win_types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width)
* ``exponential`` (needs tau), center is set to None.
If ``win_type=None`` all points are evenly weighted. To learn more about
different window types see `scipy.signal window functions
<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
Rolling sum with a window length of 2, using the 'triang'
window type.
>>> df.rolling(2, win_type='triang').sum()
B
0 NaN
1 0.5
2 1.5
3 NaN
4 NaN
Rolling sum with a window length of 2, min_periods defaults
to the window length.
>>> df.rolling(2).sum()
B
0 NaN
1 1.0
2 3.0
3 NaN
4 NaN
Same as above, but explicitly set the min_periods
>>> df.rolling(2, min_periods=1).sum()
B
0 0.0
1 1.0
2 3.0
3 2.0
4 4.0
A ragged (meaning not-a-regular frequency), time-indexed DataFrame
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
... index = [pd.Timestamp('20130101 09:00:00'),
... pd.Timestamp('20130101 09:00:02'),
... pd.Timestamp('20130101 09:00:03'),
... pd.Timestamp('20130101 09:00:05'),
... pd.Timestamp('20130101 09:00:06')])
>>> df
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 2.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
Contrasting to an integer rolling window, this will roll a variable
length window corresponding to the time period.
The default for min_periods is 1.
>>> df.rolling('2s').sum()
B
2013-01-01 09:00:00 0.0
2013-01-01 09:00:02 1.0
2013-01-01 09:00:03 3.0
2013-01-01 09:00:05 NaN
2013-01-01 09:00:06 4.0
"""
def validate(self):
super().validate()
window = self.window
if isinstance(window, (list, tuple, np.ndarray)):
pass
elif is_integer(window):
if window <= 0:
raise ValueError("window must be > 0 ")
import_optional_dependency(
"scipy", extra="Scipy is required to generate window weight."
)
import scipy.signal as sig
if not isinstance(self.win_type, str):
raise ValueError("Invalid win_type {0}".format(self.win_type))
if getattr(sig, self.win_type, None) is None:
raise ValueError("Invalid win_type {0}".format(self.win_type))
else:
raise ValueError("Invalid window {0}".format(window))
def _prep_window(self, **kwargs):
"""
Provide validation for our window type, return the window
we have already been validated.
"""
window = self._get_window()
if isinstance(window, (list, tuple, np.ndarray)):
return com.asarray_tuplesafe(window).astype(float)
elif is_integer(window):
import scipy.signal as sig
# the below may pop from kwargs
def _validate_win_type(win_type, kwargs):
arg_map = {
"kaiser": ["beta"],
"gaussian": ["std"],
"general_gaussian": ["power", "width"],
"slepian": ["width"],
"exponential": ["tau"],
}
if win_type in arg_map:
win_args = _pop_args(win_type, arg_map[win_type], kwargs)
if win_type == "exponential":
# exponential window requires the first arg (center)
# to be set to None (necessary for symmetric window)
win_args.insert(0, None)
return tuple([win_type] + win_args)
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = "%s window requires %%s" % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
win_type = _validate_win_type(self.win_type, kwargs)
# GH #15662. `False` makes symmetric window, rather than periodic.
return sig.get_window(win_type, window, False).astype(float)
def _apply_window(self, mean=True, **kwargs):
"""
Applies a moving window of type ``window_type`` on the data.
Parameters
----------
mean : bool, default True
If True computes weighted mean, else weighted sum
Returns
-------
y : same type as input argument
"""
window = self._prep_window(**kwargs)
center = self.center
blocks, obj = self._create_blocks()
block_list = list(blocks)
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, len(window))
return libwindow.roll_window(
np.concatenate((arg, additional_nans)) if center else arg,
window,
minp,
avg=mean,
)
result = np.apply_along_axis(f, self.axis, values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
_agg_see_also_doc = dedent(
"""
See Also
--------
pandas.DataFrame.rolling.aggregate
pandas.DataFrame.aggregate
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3, win_type='boxcar').agg('mean')
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -0.885035 0.212600 -0.711689
3 -0.323928 -0.200122 -1.093408
4 -0.071445 -0.431533 -1.075833
5 0.504739 0.676083 -0.996353
6 0.358206 1.903256 -0.774200
7 0.906020 1.283573 0.085482
8 -0.096361 0.818139 0.472290
9 0.070889 0.134399 -0.031308
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/DataFrame",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
# these must apply directly
result = arg(self)
return result
agg = aggregate
@Substitution(name="window")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply_window(mean=False, **kwargs)
@Substitution(name="window")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply_window(mean=True, **kwargs)
class _GroupByMixin(GroupByMixin):
"""
Provide the groupby facilities.
"""
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop("parent", None) # noqa
groupby = kwargs.pop("groupby", None)
if groupby is None:
groupby, obj = obj, obj.obj
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
super().__init__(obj, *args, **kwargs)
count = GroupByMixin._dispatch("count")
corr = GroupByMixin._dispatch("corr", other=None, pairwise=None)
cov = GroupByMixin._dispatch("cov", other=None, pairwise=None)
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Dispatch to apply; we are stripping all of the _apply kwargs and
performing the original function call on the grouped object.
"""
def f(x, name=name, *args):
x = self._shallow_copy(x)
if isinstance(name, str):
return getattr(x, name)(*args, **kwargs)
return x.apply(name, *args, **kwargs)
return self._groupby.apply(f)
class _Rolling(_Window):
@property
def _constructor(self):
return Rolling
def _apply(
self, func, name=None, window=None, center=None, check_minp=None, **kwargs
):
"""
Rolling statistical measure using supplied function.
Designed to be used with passed-in Cython array-based functions.
Parameters
----------
func : str/callable to apply
name : str, optional
name of this function
window : int/array, default to _get_window()
center : bool, default to self.center
check_minp : function, default to _use_window
Returns
-------
y : type of input
"""
if center is None:
center = self.center
if window is None:
window = self._get_window()
if check_minp is None:
check_minp = _use_window
blocks, obj = self._create_blocks()
block_list = list(blocks)
index_as_array = self._get_index()
results = []
exclude = []
for i, b in enumerate(blocks):
try:
values = self._prep_values(b.values)
except (TypeError, NotImplementedError):
if isinstance(obj, ABCDataFrame):
exclude.extend(b.columns)
del block_list[i]
continue
else:
raise DataError("No numeric types to aggregate")
if values.size == 0:
results.append(values.copy())
continue
# if we have a string function name, wrap it
if isinstance(func, str):
cfunc = getattr(libwindow, func, None)
if cfunc is None:
raise ValueError(
"we do not support this function "
"in libwindow.{func}".format(func=func)
)
def func(arg, window, min_periods=None, closed=None):
minp = check_minp(min_periods, window)
# ensure we are only rolling on floats
arg = ensure_float64(arg)
return cfunc(arg, window, minp, index_as_array, closed, **kwargs)
# calculation function
if center:
offset = _offset(window, center)
additional_nans = np.array([np.NaN] * offset)
def calc(x):
return func(
np.concatenate((x, additional_nans)),
window,
min_periods=self.min_periods,
closed=self.closed,
)
else:
def calc(x):
return func(
x, window, min_periods=self.min_periods, closed=self.closed
)
with np.errstate(all="ignore"):
if values.ndim > 1:
result = np.apply_along_axis(calc, self.axis, values)
else:
result = calc(values)
if center:
result = self._center_window(result, window)
results.append(result)
return self._wrap_results(results, block_list, obj, exclude)
class _Rolling_and_Expanding(_Rolling):
_shared_docs["count"] = dedent(
r"""
The %(name)s count of any non-NaN observations inside the window.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
DataFrame.count : Count of the full DataFrame.
Examples
--------
>>> s = pd.Series([2, 3, np.nan, 10])
>>> s.rolling(2).count()
0 1.0
1 2.0
2 1.0
3 1.0
dtype: float64
>>> s.rolling(3).count()
0 1.0
1 2.0
2 2.0
3 2.0
dtype: float64
>>> s.rolling(4).count()
0 1.0
1 2.0
2 2.0
3 3.0
dtype: float64
"""
)
def count(self):
blocks, obj = self._create_blocks()
# Validate the index
self._get_index()
window = self._get_window()
window = min(window, len(obj)) if not self.center else window
results = []
for b in blocks:
result = b.notna().astype(int)
result = self._constructor(
result,
window=window,
min_periods=0,
center=self.center,
axis=self.axis,
closed=self.closed,
).sum()
results.append(result)
return self._wrap_results(results, blocks, obj)
_shared_docs["apply"] = dedent(
r"""
The %(name)s function's apply function.
Parameters
----------
func : function
Must produce a single value from an ndarray input if ``raw=True``
or a single value from a Series if ``raw=False``.
raw : bool, default None
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` or ``None`` : the passed function will receive ndarray
objects instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
The `raw` parameter is required and will show a FutureWarning if
not passed. In the future `raw` will default to False.
.. versionadded:: 0.23.0
*args, **kwargs
Arguments and keyword arguments to be passed into func.
Returns
-------
Series or DataFrame
Return type is determined by the caller.
See Also
--------
Series.%(name)s : Series %(name)s.
DataFrame.%(name)s : DataFrame %(name)s.
"""
)
def apply(self, func, raw=None, args=(), kwargs={}):
from pandas import Series
kwargs.pop("_level", None)
window = self._get_window()
offset = _offset(window, self.center)
index_as_array = self._get_index()
# TODO: default is for backward compat
# change to False in the future
if raw is None:
warnings.warn(
"Currently, 'apply' passes the values as ndarrays to the "
"applied function. In the future, this will change to passing "
"it as Series objects. You need to specify 'raw=True' to keep "
"the current behaviour, and you can pass 'raw=False' to "
"silence this warning",
FutureWarning,
stacklevel=3,
)
raw = True
def f(arg, window, min_periods, closed):
minp = _use_window(min_periods, window)
if not raw:
arg = Series(arg, index=self.obj.index)
return libwindow.roll_generic(
arg,
window,
minp,
index_as_array,
closed,
offset,
func,
raw,
args,
kwargs,
)
return self._apply(f, func, args=args, kwargs=kwargs, center=False, raw=raw)
def sum(self, *args, **kwargs):
nv.validate_window_func("sum", args, kwargs)
return self._apply("roll_sum", "sum", **kwargs)
_shared_docs["max"] = dedent(
"""
Calculate the %(name)s maximum.
Parameters
----------
*args, **kwargs
Arguments and keyword arguments to be passed into func.
"""
)
def max(self, *args, **kwargs):
nv.validate_window_func("max", args, kwargs)
return self._apply("roll_max", "max", **kwargs)
_shared_docs["min"] = dedent(
"""
Calculate the %(name)s minimum.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with a Series.
DataFrame.%(name)s : Calling object with a DataFrame.
Series.min : Similar method for Series.
DataFrame.min : Similar method for DataFrame.
Examples
--------
Performing a rolling minimum with a window size of 3.
>>> s = pd.Series([4, 3, 5, 2, 6])
>>> s.rolling(3).min()
0 NaN
1 NaN
2 3.0
3 2.0
4 2.0
dtype: float64
"""
)
def min(self, *args, **kwargs):
nv.validate_window_func("min", args, kwargs)
return self._apply("roll_min", "min", **kwargs)
def mean(self, *args, **kwargs):
nv.validate_window_func("mean", args, kwargs)
return self._apply("roll_mean", "mean", **kwargs)
_shared_docs["median"] = dedent(
"""
Calculate the %(name)s median.
Parameters
----------
**kwargs
For compatibility with other %(name)s methods. Has no effect
on the computed median.
Returns
-------
Series or DataFrame
Returned type is the same as the original object.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.median : Equivalent method for Series.
DataFrame.median : Equivalent method for DataFrame.
Examples
--------
Compute the rolling median of a series with a window size of 3.
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.rolling(3).median()
0 NaN
1 NaN
2 1.0
3 2.0
4 3.0
dtype: float64
"""
)
def median(self, **kwargs):
return self._apply("roll_median_c", "median", **kwargs)
_shared_docs["std"] = dedent(
"""
Calculate %(name)s standard deviation.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.std : Equivalent method for Series.
DataFrame.std : Equivalent method for DataFrame.
numpy.std : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in Series.std is different than the default
`ddof` of 0 in numpy.std.
A minimum of one period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).std()
0 NaN
1 NaN
2 0.577350
3 1.000000
4 1.000000
5 1.154701
6 0.000000
dtype: float64
>>> s.expanding(3).std()
0 NaN
1 NaN
2 0.577350
3 0.957427
4 0.894427
5 0.836660
6 0.786796
dtype: float64
"""
)
def std(self, ddof=1, *args, **kwargs):
nv.validate_window_func("std", args, kwargs)
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _require_min_periods(1)(self.min_periods, window)
return _zsqrt(
libwindow.roll_var(arg, window, minp, index_as_array, self.closed, ddof)
)
return self._apply(
f, "std", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs["var"] = dedent(
"""
Calculate unbiased %(name)s variance.
Normalized by N-1 by default. This can be changed using the `ddof`
argument.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
*args, **kwargs
For NumPy compatibility. No additional arguments are used.
Returns
-------
Series or DataFrame
Returns the same object type as the caller of the %(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.var : Equivalent method for Series.
DataFrame.var : Equivalent method for DataFrame.
numpy.var : Equivalent method for Numpy array.
Notes
-----
The default `ddof` of 1 used in :meth:`Series.var` is different than the
default `ddof` of 0 in :func:`numpy.var`.
A minimum of 1 period is required for the rolling calculation.
Examples
--------
>>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])
>>> s.rolling(3).var()
0 NaN
1 NaN
2 0.333333
3 1.000000
4 1.000000
5 1.333333
6 0.000000
dtype: float64
>>> s.expanding(3).var()
0 NaN
1 NaN
2 0.333333
3 0.916667
4 0.800000
5 0.700000
6 0.619048
dtype: float64
"""
)
def var(self, ddof=1, *args, **kwargs):
nv.validate_window_func("var", args, kwargs)
return self._apply(
"roll_var", "var", check_minp=_require_min_periods(1), ddof=ddof, **kwargs
)
_shared_docs[
"skew"
] = """
Unbiased %(name)s skewness.
Parameters
----------
**kwargs
Keyword arguments to be passed into func.
"""
def skew(self, **kwargs):
return self._apply(
"roll_skew", "skew", check_minp=_require_min_periods(3), **kwargs
)
_shared_docs["kurt"] = dedent(
"""
Calculate unbiased %(name)s kurtosis.
This function uses Fisher's definition of kurtosis without bias.
Parameters
----------
**kwargs
Under Review.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.kurt : Equivalent method for Series.
DataFrame.kurt : Equivalent method for DataFrame.
scipy.stats.skew : Third moment of a probability density.
scipy.stats.kurtosis : Reference SciPy method.
Notes
-----
A minimum of 4 periods is required for the %(name)s calculation.
"""
)
def kurt(self, **kwargs):
return self._apply(
"roll_kurt", "kurt", check_minp=_require_min_periods(4), **kwargs
)
_shared_docs["quantile"] = dedent(
"""
Calculate the %(name)s quantile.
Parameters
----------
quantile : float
Quantile to compute. 0 <= quantile <= 1.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.23.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
**kwargs:
For compatibility with other %(name)s methods. Has no effect on
the result.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the %(name)s
calculation.
See Also
--------
Series.quantile : Computes value at the given quantile over all data
in Series.
DataFrame.quantile : Computes values at the given quantile over
requested axis in DataFrame.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.rolling(2).quantile(.4, interpolation='lower')
0 NaN
1 1.0
2 2.0
3 3.0
dtype: float64
>>> s.rolling(2).quantile(.4, interpolation='midpoint')
0 NaN
1 1.5
2 2.5
3 3.5
dtype: float64
"""
)
def quantile(self, quantile, interpolation="linear", **kwargs):
window = self._get_window()
index_as_array = self._get_index()
def f(arg, *args, **kwargs):
minp = _use_window(self.min_periods, window)
if quantile == 1.0:
return libwindow.roll_max(
arg, window, minp, index_as_array, self.closed
)
elif quantile == 0.0:
return libwindow.roll_min(
arg, window, minp, index_as_array, self.closed
)
else:
return libwindow.roll_quantile(
arg,
window,
minp,
index_as_array,
self.closed,
quantile,
interpolation,
)
return self._apply(f, "quantile", quantile=quantile, **kwargs)
_shared_docs[
"cov"
] = """
Calculate the %(name)s sample covariance.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self and produce pairwise
output.
pairwise : bool, default None
If False then only matching columns between self and other will be
used and the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the
output will be a MultiIndexed DataFrame in the case of DataFrame
inputs. In the case of missing elements, only complete pairwise
observations will be used.
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
**kwargs
Keyword arguments to be passed into func.
"""
def cov(self, other=None, pairwise=None, ddof=1, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
# GH 16058: offset window
if self.is_freq_type:
window = self.win_freq
else:
window = self._get_window(other)
def _get_cov(X, Y):
# GH #12373 : rolling functions error on float32 data
# to avoid potential overflow, cast the data to float64
X = X.astype("float64")
Y = Y.astype("float64")
mean = lambda x: x.rolling(
window, self.min_periods, center=self.center
).mean(**kwargs)
count = (X + Y).rolling(window=window, center=self.center).count(**kwargs)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise)
)
_shared_docs["corr"] = dedent(
"""
Calculate %(name)s correlation.
Parameters
----------
other : Series, DataFrame, or ndarray, optional
If not supplied then will default to self.
pairwise : bool, default None
Calculate pairwise combinations of columns within a
DataFrame. If `other` is not specified, defaults to `True`,
otherwise defaults to `False`.
Not relevant for :class:`~pandas.Series`.
**kwargs
Unused.
Returns
-------
Series or DataFrame
Returned object type is determined by the caller of the
%(name)s calculation.
See Also
--------
Series.%(name)s : Calling object with Series data.
DataFrame.%(name)s : Calling object with DataFrames.
Series.corr : Equivalent method for Series.
DataFrame.corr : Equivalent method for DataFrame.
%(name)s.cov : Similar method to calculate covariance.
numpy.corrcoef : NumPy Pearson's correlation calculation.
Notes
-----
This function uses Pearson's definition of correlation
(https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).
When `other` is not specified, the output will be self correlation (e.g.
all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`
set to `True`.
Function will return ``NaN`` for correlations of equal valued sequences;
this is the result of a 0/0 division error.
When `pairwise` is set to `False`, only matching columns between `self` and
`other` will be used.
When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame
with the original index on the first level, and the `other` DataFrame
columns on the second level.
In the case of missing elements, only complete pairwise observations
will be used.
Examples
--------
The below example shows a rolling calculation with a window size of
four matching the equivalent function call using :meth:`numpy.corrcoef`.
>>> v1 = [3, 3, 3, 5, 8]
>>> v2 = [3, 4, 4, 4, 8]
>>> fmt = "{0:.6f}" # limit the printed precision to 6 digits
>>> # numpy returns a 2X2 array, the correlation coefficient
>>> # is the number at entry [0][1]
>>> print(fmt.format(np.corrcoef(v1[:-1], v2[:-1])[0][1]))
0.333333
>>> print(fmt.format(np.corrcoef(v1[1:], v2[1:])[0][1]))
0.916949
>>> s1 = pd.Series(v1)
>>> s2 = pd.Series(v2)
>>> s1.rolling(4).corr(s2)
0 NaN
1 NaN
2 NaN
3 0.333333
4 0.916949
dtype: float64
The below example shows a similar rolling calculation on a
DataFrame using the pairwise option.
>>> matrix = np.array([[51., 35.], [49., 30.], [47., 32.],\
[46., 31.], [50., 36.]])
>>> print(np.corrcoef(matrix[:-1,0], matrix[:-1,1]).round(7))
[[1. 0.6263001]
[0.6263001 1. ]]
>>> print(np.corrcoef(matrix[1:,0], matrix[1:,1]).round(7))
[[1. 0.5553681]
[0.5553681 1. ]]
>>> df = pd.DataFrame(matrix, columns=['X','Y'])
>>> df
X Y
0 51.0 35.0
1 49.0 30.0
2 47.0 32.0
3 46.0 31.0
4 50.0 36.0
>>> df.rolling(4).corr(pairwise=True)
X Y
0 X NaN NaN
Y NaN NaN
1 X NaN NaN
Y NaN NaN
2 X NaN NaN
Y NaN NaN
3 X 1.000000 0.626300
Y 0.626300 1.000000
4 X 1.000000 0.555368
Y 0.555368 1.000000
"""
)
def corr(self, other=None, pairwise=None, **kwargs):
if other is None:
other = self._selected_obj
# only default unset
pairwise = True if pairwise is None else pairwise
other = self._shallow_copy(other)
window = self._get_window(other)
def _get_corr(a, b):
a = a.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
b = b.rolling(
window=window, min_periods=self.min_periods, center=self.center
)
return a.cov(b, **kwargs) / (a.std(**kwargs) * b.std(**kwargs))
return _flex_binary_moment(
self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise)
)
class Rolling(_Rolling_and_Expanding):
@cache_readonly
def is_datetimelike(self):
return isinstance(
self._on, (ABCDatetimeIndex, ABCTimedeltaIndex, ABCPeriodIndex)
)
@cache_readonly
def _on(self):
if self.on is None:
return self.obj.index
elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns:
from pandas import Index
return Index(self.obj[self.on])
else:
raise ValueError(
"invalid on specified as {0}, "
"must be a column (if DataFrame) "
"or None".format(self.on)
)
def validate(self):
super().validate()
# we allow rolling on a datetimelike index
if (self.obj.empty or self.is_datetimelike) and isinstance(
self.window, (str, ABCDateOffset, timedelta)
):
self._validate_monotonic()
freq = self._validate_freq()
# we don't allow center
if self.center:
raise NotImplementedError(
"center is not implemented "
"for datetimelike and offset "
"based windows"
)
# this will raise ValueError on non-fixed freqs
self.win_freq = self.window
self.window = freq.nanos
self.win_type = "freq"
# min_periods must be an integer
if self.min_periods is None:
self.min_periods = 1
elif not is_integer(self.window):
raise ValueError("window must be an integer")
elif self.window < 0:
raise ValueError("window must be non-negative")
if not self.is_datetimelike and self.closed is not None:
raise ValueError(
"closed only implemented for datetimelike " "and offset based windows"
)
def _validate_monotonic(self):
"""
Validate on is_monotonic.
"""
if not self._on.is_monotonic:
formatted = self.on or "index"
raise ValueError("{0} must be " "monotonic".format(formatted))
def _validate_freq(self):
"""
Validate & return window frequency.
"""
from pandas.tseries.frequencies import to_offset
try:
return to_offset(self.window)
except (TypeError, ValueError):
raise ValueError(
"passed window {0} is not "
"compatible with a datetimelike "
"index".format(self.window)
)
_agg_see_also_doc = dedent(
"""
See Also
--------
Series.rolling
DataFrame.rolling
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])
>>> df
A B C
0 -2.385977 -0.102758 0.438822
1 -1.004295 0.905829 -0.954544
2 0.735167 -0.165272 -1.619346
3 -0.702657 -1.340923 -0.706334
4 -0.246845 0.211596 -0.901819
5 2.463718 3.157577 -1.380906
6 -1.142255 2.340594 -0.039875
7 1.396598 -1.647453 1.677227
8 -0.543425 1.761277 -0.220481
9 -0.640505 0.289374 -1.550670
>>> df.rolling(3).sum()
A B C
0 NaN NaN NaN
1 NaN NaN NaN
2 -2.655105 0.637799 -2.135068
3 -0.971785 -0.600366 -3.280224
4 -0.214334 -1.294599 -3.227500
5 1.514216 2.028250 -2.989060
6 1.074618 5.709767 -2.322600
7 2.718061 3.850718 0.256446
8 -0.289082 2.454418 1.416871
9 0.212668 0.403198 -0.093924
>>> df.rolling(3).agg({'A':'sum', 'B':'min'})
A B
0 NaN NaN
1 NaN NaN
2 -2.655105 -0.165272
3 -0.971785 -1.340923
4 -0.214334 -1.340923
5 1.514216 -1.340923
6 1.074618 0.211596
7 2.718061 -1.647453
8 -0.289082 -1.647453
9 0.212668 -1.647453
"""
)
@Substitution(
see_also=_agg_see_also_doc,
examples=_agg_examples_doc,
versionadded="",
klass="Series/Dataframe",
axis="",
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, arg, *args, **kwargs):
return super().aggregate(arg, *args, **kwargs)
agg = aggregate
@Substitution(name="rolling")
@Appender(_shared_docs["count"])
def count(self):
# different impl for freq counting
if self.is_freq_type:
return self._apply("roll_count", "count")
return super().count()
@Substitution(name="rolling")
@Appender(_shared_docs["apply"])
def apply(self, func, raw=None, args=(), kwargs={}):
return super().apply(func, raw=raw, args=args, kwargs=kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["sum"])
def sum(self, *args, **kwargs):
nv.validate_rolling_func("sum", args, kwargs)
return super().sum(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@Appender(_shared_docs["max"])
def max(self, *args, **kwargs):
nv.validate_rolling_func("max", args, kwargs)
return super().max(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["min"])
def min(self, *args, **kwargs):
nv.validate_rolling_func("min", args, kwargs)
return super().min(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["mean"])
def mean(self, *args, **kwargs):
nv.validate_rolling_func("mean", args, kwargs)
return super().mean(*args, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["median"])
def median(self, **kwargs):
return super().median(**kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["std"])
def std(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("std", args, kwargs)
return super().std(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_shared_docs["var"])
def var(self, ddof=1, *args, **kwargs):
nv.validate_rolling_func("var", args, kwargs)
return super().var(ddof=ddof, **kwargs)
@Substitution(name="rolling")
@Appender(_doc_template)
@ | Appender(_shared_docs["skew"]) | pandas.util._decorators.Appender |
import pandas as pd, numpy as np, os, torch, gc, datetime
import matplotlib.dates as mdates, matplotlib.pyplot as plt, matplotlib as mpl
import torchvision.transforms.functional as TF
from pandas_datareader import data
from alpha_vantage.timeseries import TimeSeries
from PIL import Image
gc.enable()
def key():
API_key = pd.read_csv('API_key')
return (API_key['API Key'][np.random.randint(0,len(API_key),1)[0]])
def __ImageToTensor(fig):
fig.savefig('./fig.png')
tensor = TF.to_tensor(Image.open('./fig.png').convert('RGB')).unsqueeze_(0)[0]
os.remove('./fig.png')
return tensor
'''
def getData(ticker, interval='1min'):
time=TimeSeries(key=key(),output_format='pandas')
data=time.get_intraday(symbol=ticker,interval=interval,outputsize='full')
return data[0]
'''
def getData(ticker, interval='1min'):
dataset = pd.read_csv('FUTURES MINUTE.txt', header = None)
dataset.columns = ['Date','time',"1. open","2. high",'3. low','4. close','5. volume']
dataset['date'] = dataset['Date'] +" "+ dataset['time']
dataset.drop('Date', axis=1, inplace=True)
dataset.drop('time', axis=1, inplace=True)
dataset['date'] = dataset['date'].apply(lambda x: | pd.to_datetime(x, errors='ignore') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 14 11:46:12 2021
This script provides a function that automatizes hypothesis testing of a
dataframe colums selecting a grouping variable. Does normality testing, then
two-sample t-test or Mann-Whitney U-test.
Change log:
2022-05-15: Added Levene’s test to check if the groups have equal variances.
This is important for the assumptions of t-test.
Added a two sample contingency test function to perform Chi-Square Test for
contingency tables.
@author: MIKLOS
"""
import datetime
import os
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats import levene
from scipy.stats import mannwhitneyu
from scipy.stats import shapiro
from scipy.stats import chi2_contingency
def get_date_tag():
"""Get date tag to track outputed files"""
date_tag = datetime.datetime.now()
return (
date_tag.strftime("%Y")
+ "_"
+ date_tag.strftime("%m")
+ "_"
+ date_tag.strftime("%d")
)
def color_boolean(val):
"""Condition of True values"""
color = ""
if val is True:
color = "green"
elif val is False:
color = "red"
else:
color = None
return "background-color: %s" % color
def two_sample_t_test(group1, group2, d, alpha):
"""Gets two-sample t-test"""
t_value, p_value = ttest_ind(group1, group2)
d["t-test p value"].append(p_value)
if p_value > alpha:
d["Sample means are the same"].append(True)
else:
d["Sample means are the same"].append(False)
d["Mann-Whitney U test p value"].append(None)
d["Sample distributions are the same"].append(None)
return d
def two_sample_wilcoxon_test(group1, group2, d, alpha):
"""Gets two sample Mann-Whitney U test"""
statistic, p_value = mannwhitneyu(group1, group2, alternative="two-sided")
d["Mann-Whitney U test p value"].append(p_value)
if p_value > alpha:
d["Sample distributions are the same"].append(True)
else:
d["Sample distributions are the same"].append(False)
d["t-test p value"].append(None)
d["Sample means are the same"].append(None)
return d
def two_sample_hypothesis_testing(df, features, group, alpha=0.05):
"""Helper function to run the two-sample hypothesis tests feature by
feature. It also checks the assumptions of the two sample t-test and runs
the mann-whitney u test if the assumptions are violated.
"""
d = {
"Feature": [],
"Group 1": [],
"Group 2": [],
"Group 1 mean": [],
"Group 2 mean": [],
"Group 1 std": [],
"Group 2 std": [],
"Shapiro test p value group 1": [],
"Shapiro test p value group 2": [],
"Group 1 is_normal": [],
"Group 2 is_normal": [],
"t-test p value": [],
"Sample means are the same": [],
"Mann-Whitney U test p value": [],
"Sample distributions are the same": [],
}
for feature in features:
d["Feature"].append(feature)
group1_name = list(set(list(df[group])))[0]
group2_name = list(set(list(df[group])))[1]
d["Group 1"].append(group1_name)
d["Group 2"].append(group2_name)
group1 = df[df[group] == list(set(list(df[group])))[0]][feature].dropna()
group2 = df[df[group] == list(set(list(df[group])))[1]][feature].dropna()
levene_stat, levene_p = levene(group1, group2)
d["Group 1 mean"].append(group1.mean())
d["Group 2 mean"].append(group2.mean())
d["Group 1 std"].append(group1.std())
d["Group 2 std"].append(group2.std())
# normality test
p_group1 = shapiro(group1)[1]
p_group2 = shapiro(group2)[1]
d["Shapiro test p value group 1"].append(p_group1)
d["Shapiro test p value group 2"].append(p_group2)
if p_group1 > alpha:
d["Group 1 is_normal"].append(True)
else:
d["Group 1 is_normal"].append(False)
if p_group2 > alpha:
d["Group 2 is_normal"].append(True)
else:
d["Group 2 is_normal"].append(False)
if (
p_group1 > alpha
and p_group2 > alpha
and len(group1) > 30
and len(group2) > 30
and levene_p > alpha
):
d = two_sample_t_test(group1, group2, d, alpha)
else:
d = two_sample_wilcoxon_test(group1, group2, d, alpha)
out_df = pd.DataFrame(d)
out_df.style.apply(color_boolean)
hypot_folder = "./hypothesis_testing/"
if not os.path.exists(hypot_folder):
os.makedirs(hypot_folder)
date_tag = get_date_tag()
filename = "hypothesis_testing_by_" + group + "_" + date_tag + ".xlsx"
out_df.style.applymap(
color_boolean,
subset=["Sample means are the same", "Sample distributions are the same"],
).to_excel(hypot_folder + filename, index=False, freeze_panes=(1, 0))
print("\n------------")
print("Hypothesis testing done.\n")
return out_df
def two_sample_contingency_test(df, features, group, alpha=0.05):
"""Chi-Square Test for contingency table"""
d = {
"Feature": [],
"Group 1": [],
"Group 2": [],
"chi value": [],
"p-value": [],
"H0: no relation between the variables": [],
"H1: significant relationship between the variables": [],
}
for feature in features:
d["Feature"].append(feature)
group1_name = list(set(list(df[group])))[0]
group2_name = list(set(list(df[group])))[1]
d["Group 1"].append(group1_name)
d["Group 2"].append(group2_name)
contigency = pd.crosstab(df[group], df[feature])
chi, p_value, dof, expected = chi2_contingency(contigency)
d["chi value"].append(chi)
d["p-value"].append(p_value)
if p_value > alpha:
d["H0: no relation between the variables"].append(True)
d["H1: significant relationship between the variables"].append(False)
else:
d["H0: no relation between the variables"].append(False)
d["H1: significant relationship between the variables"].append(True)
out_df = pd.DataFrame(d)
out_df.style.apply(color_boolean)
hypot_folder = "./hypothesis_testing/"
if not os.path.exists(hypot_folder):
os.makedirs(hypot_folder)
date_tag = get_date_tag()
filename = "contingency_chisquare_by_" + group + "_" + date_tag + ".xlsx"
out_df.style.applymap(
color_boolean,
subset=[
"H0: no relation between the variables",
"H1: significant relationship between the variables",
],
).to_excel(hypot_folder + filename, index=False, freeze_panes=(1, 0))
print("\n------------")
print("Contingency chisquare testing done.\n")
return out_df
def main():
"""Main function"""
print("It's okay.")
data = {
"age": [35, 43, 32, 19, 67, 89, 45, 65, 54, 65],
"sex": [
"male",
"female",
"female",
"male",
"female",
"female",
"male",
"female",
"female",
"male",
],
"height": [180, 170, 170, 195, 166, 167, 168, 167, 170, 190],
"weight": [80, 61, 59, 85, 55, 55, 81, 65, 60, 88],
"ac": [94, 80, 82, 84, 88, 77, 62, 64, 87, 100],
}
df = | pd.DataFrame(data) | pandas.DataFrame |
import pickle
import numpy as np
import pandas as pd
from AlchemicalAssistant.FEPBOSSReader import bossPdbAtom2Element,ucomb,tor_cent
from AlchemicalAssistant.Vector_algebra import pairing_func,AtomNum2Symb,AtomNum2Mass
from AlchemicalAssistant.MolReaders import ang_id,tor_id
from AlchemicalAssistant.TINKER_Rel_FEP import xyz_prep,tinker_prm
def pdb_prep(atoms, coos, resid='A2B',pdbname='COMBO'):
opdb = open(pdbname+'_NAMD.pdb', 'w+')
opdb.write('REMARK LIGPARGEN GENERATED PDB FILE\n')
num = 0
for (i, j) in zip(atoms, coos):
num += 1
opdb.write('%-6s%5d %4s %3s %4d %8.3f%8.3f%8.3f\n' %
('ATOM', num, i, resid, 1, j[0], j[1], j[2]))
opdb.write('END\n')
opdb.close()
return None
###
def MapMolecules(map_dict,num):
if num in map_dict.keys(): return map_dict[num]
else: return num+1
def TranslateICs(amol,bmol,map_dict,zdf):
amol['BONDS'][['cl1','cl2']] = amol['BONDS'][['cl1','cl2']].apply(lambda x: x+3)
bmol['BONDS'][['cl1','cl2']] = bmol['BONDS'][['cl1','cl2']].applymap(lambda x: MapMolecules(map_dict,x+3))
all_bonds = | pd.concat([amol['BONDS'],bmol['BONDS']],axis=0) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # US Beveridge Curve Data
#
# Construct monthly unemploment rate and vacancy rate series for the US from April 1929 through the most recently available date. The methodology is based on the approach described in Petrosky-Nadeau and Zhang (2013): https://papers.ssrn.com/sol3/papers.cfm?abstract_id=2241695
#
# 1. This Notebook is compatible with Python 2 and 3.
#
# 2. **To use this notebook to download the entire dataset, you need the X-13ARIMA-SEATS binary**. If you don't have the binary, set variable `x_13` to `False`. Data that require seasonal adjustment will be loaded from the `txt` directory of the parent directory to this program.
#
# Binaries for Windows and Linux/Unix are available from https://www.census.gov/srd/www/x13as/. To compile X-13 for Mac OS X, see the instructions here: https://github.com/christophsax/seasonal/wiki/Compiling-X-13ARIMA-SEATS-from-Source-for-OS-X.
# In[1]:
import statsmodels as sm
import fredpy as fp
import matplotlib.pyplot as plt
plt.style.use('classic')
import numpy as np
import pandas as pd
import os,urllib
import warnings
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('matplotlib', 'inline')
# You must change XPATH if you are running this script from anywhere other than the directory containing x13as.
XPATH = os.getcwd()
# Load fredpy api key
fp.api_key = fp.load_api_key('fred_api_key.txt')
# Whether x13 binary is available
x_13 = False
# ## Unemployment Rate
#
# Construct an unemployment series from April 1929 through the most recent date available by concatenating four U.S. unemployment rate series; all of which are available from FRED (https://fred.stlouisfed.org/). Specifically:
#
# 1. Seasonally adjusted unemployment rate for the United States from April 1929 through February 1940. FRED series ID: M0892AUSM156SNBR. NBER Indicator: m08292a.
# 2. Seasonally adjusted unemployment rate for the United States from March 1940 through December 1946. FRED series ID: M0892BUSM156SNBR. NBER Indicator: m08292b.
# 3. Seasonally adjusted unemployment rate for the United States from January 1947 through December 1947. FRED series ID: M0892CUSM156NNBR. NBER Indicator: m08292c. Note: The source data are not seasonally adjusted and contain observations through December 1966. Seasonally adjust the entire series through December 1966 using the U.S. Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after December 1947. *Only downloaded if `x_13 == True.`*
# 4. Seasonally adjusted unemployment rate for the United States from January 1948 through the most recent date available. FRED series ID: UNRATE.
# In[2]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1929-04-01 to 1940-02-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_1 = fp.series('M0892AUSM156SNBR')
unemp_1 = unemp_1.window(['04-01-1929','02-01-1940']).data
# In[3]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1940-03-01 to 1946-12-01;
# Seasonally adjusted
# Download from FRED and save as a Pandas series
unemp_2 = fp.series('M0892BUSM156SNBR')
unemp_2 = unemp_2.window(['03-01-1940','12-01-1946']).data
# In[4]:
# Historical US unemployment rate from the NBER Macrohistory Database: 1947-01-01 to 1966-12-01;
# Raw series is *not* seasonally adjusted
if x_13:
# Download from FRED
unemp_3 = fp.series('M0892CUSM156NNBR')
unemp_3 = unemp_3.window(['01-01-1947','12-01-1966']).data
# Run x13_arima_analysis to obtain SA unemployment data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = unemp_3,x12path=XPATH, outlier=False,print_stdout=True)
unemp_3 = pd.Series(x13results.seasadj.values,index=unemp_3.index)
unemp_3 = unemp_3[(unemp_3.index>=pd.to_datetime('01-01-1947')) & (unemp_3.index<=pd.to_datetime('12-01-1947'))]
# Export the series to txt
unemp_3.to_csv('../txt/unemployment_1947.txt',sep='\t')
else:
# Import data
unemp_3 = pd.read_csv('../txt/unemployment_1947.txt',sep='\t',index_col=0,parse_dates=True)['0']
# In[5]:
# US civilian unemployment rate from the BLS: 1948-01-01 to most recent;
# Seasonally adjusted
unemp_4 = fp.series('UNRATE')
unemp_4 = unemp_4.window(['01-01-1948','01-01-2200']).data
# In[6]:
# Concatenate the first three series
unemployment_rate_series = unemp_1.append(unemp_2).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_3).sort_index()
unemployment_rate_series = unemployment_rate_series.append(unemp_4).sort_index()
# plot the series and save the figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(unemployment_rate_series,'-',lw=4,alpha = 0.65)
ax.set_ylabel('Percent')
ax.grid()
fig.tight_layout()
plt.savefig('../png/fig_data_unrate.png',bbox_inches='tight',dpi=120)
#
# ## Vacancies (Job openings)
#
# Construct a series of vacancies for the United States going back to April 1929 by scaling and concatenating three series:
# 1. Help-wanted advertising in newspapers index for United States from April 1929 to January 1960. FRED series ID: M0882AUSM349NNBR. NBER Indicator: m08082a. Note: The source data are not seasonally adjusted and contain observations through August 1960. Seasonally adjust the entire series through August 1960 using the United States Census Bureau's X-13-ARIMA seasonal adjustment program. Then discard values after January 1960. *Only downloaded if `x_13 == True.`*
# 2. Composite help-wanted index from January 1960 through January 2001 constructed using the method described in and Barnichon (2010). The data are from Barnichon's website https://sites.google.com/site/regisbarnichon/data. Scale this series so that its value in January 1960 equals the value of the NBER's help-wanted index for the same date.
# 3. Job openings, total nonfarm for the United States from January 2001 to the most recent date available. FRED series ID: JTSJOL. Scale this series so that its value in January 2001 equals the value of the scaled help-wanted index from Barnichon for the same date.
# In[7]:
if x_13:
# Met life help-wanted index: 1919-01-01 to 1960-08-01;
# Not seasonally adjusted
vac_1 = fp.series('M0882AUSM349NNBR').data
# temp_series = pd.Series(vac_1.data,index=pd.to_datetime(vac_1.dates))
# Run x13_arima_analysis to obtain SA vacancy rate data.
x13results = sm.tsa.x13.x13_arima_analysis(endog = vac_1,x12path=XPATH, outlier=False,print_stdout=True)
vac_1 = pd.Series(x13results.seasadj.values,index=vac_1.index)
vac_1 = vac_1[(vac_1.index>=pd.to_datetime('04-01-1929')) ]
# Export the series to txt
vac_1.to_csv('../txt/vacancies_1929-1960.txt',sep='\t')
else:
vac_1 = | pd.read_csv('../txt/vacancies_1929-1960.txt',sep='\t',index_col=0,parse_dates=True) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
pd.set_option('max_columns', None)
pd.set_option('max_rows', None)
sdss = pd.read_csv('/home/vitorbootz/research/flux_measurements/sdss_flux_lines.csv')
sample = pd.read_csv('/home/vitorbootz/research/flux_measurements/sample_flux_lines.csv')
main = pd.read_csv('/home/vitorbootz/research/aux_files/galaxy_list.csv')
sdss.sort_values(['lcgID'], inplace=True)
sdss.index = range(len(sdss))
sdss_sample = | pd.DataFrame([]) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = | sql.SQLDatabase(engine2, meta=meta) | pandas.io.sql.SQLDatabase |
import argparse
import pandas as pd
import os
from random import shuffle
def parse_args():
parser = argparse.ArgumentParser(description="Takes the meta_data, l4, and l4_no_pca files for the train, val and test sets "
"of students and returns them in libsvc format.")
parser.add_argument("--in_path", dest="in_path", help="Path to folder with meta_data, l4 and l4_no_pca", type=str,
required=True)
parser.add_argument("--test", dest="test", help="Id of VP for test", type=int, required=True)
parser.add_argument("--train", dest="train", help="String of train vp ids, comma separated: 1,12,15,9,2", type=str,
required=True)
parser.add_argument("--val", dest="val", help="String of val vp ids, comma separated: 1,12,15,9,2", type=str,
required=True)
parser.add_argument("--out_path", dest="out_path", help="Path for outputting the formatted files.", type=str,
required=True)
return parser.parse_args()
def to_libsvm_format(df: pd.DataFrame, labels: pd.DataFrame):
out = ""
for idx, row in df.iterrows():
out += "{} ".format(int(labels.iloc[idx].values[0]))
row_list = row.values.tolist()
for i in range(len(row_list)):
out += "{}:{} ".format(i+1, row_list[i])
out += "\n"
return out
# args = parse_args()
# test = "{}.csv".format(args.test)
# vals = args.val.split(",")
# val = ["{}.csv".format(x) for x in vals]
# trains = args.train.split(",")
# train = ["{}.csv".format(x) for x in trains]
in_path = "../../source/train_val_test_sets/" # args.in_path
out_path = "../../source/libsvm_train_test_val/" # args.out_path
out_df = pd.DataFrame([], columns=["test", "val", "train"])
files = os.listdir(in_path + "meta_data/")
count = 1
for file in files:
print("{} {}/{}".format(file, count, len(files)))
count += 1
test = file
copy = files.copy()
copy.remove(test)
shuffle(copy)
train = copy[:9]
val = copy[9:]
out_df = out_df.append(pd.DataFrame([[test[:-4], str([x[:-4] for x in val])[1:-1], str([x[:-4] for x in train])[1:-1]]],
columns=out_df.columns))
test_label_df = pd.read_csv("{}labels/{}".format(in_path, test))
test_hp_df = pd.read_csv("{}hp/{}".format(in_path, test))
test_l4_df = pd.read_csv("{}l4/{}".format(in_path, test))
test_l4_no_pca_df = pd.read_csv("{}l4_no_pca/{}".format(in_path, test))
val_label_df = pd.DataFrame([])
val_hp_df = pd.DataFrame([])
val_l4_df = pd.DataFrame([])
val_l4_no_pca_df = pd.DataFrame([])
for vvp in val:
val_label_df = val_label_df.append(pd.read_csv("{}labels/{}".format(in_path, vvp)))
val_hp_df = val_hp_df.append(pd.read_csv("{}hp/{}".format(in_path, vvp)))
val_l4_df = val_l4_df.append(pd.read_csv("{}l4/{}".format(in_path, vvp)))
val_l4_no_pca_df = val_l4_no_pca_df.append(pd.read_csv("{}l4_no_pca/{}".format(in_path, vvp)))
train_label_df = pd.DataFrame([])
train_hp_df = pd.DataFrame([])
train_l4_df = pd.DataFrame([])
train_l4_no_pca_df = | pd.DataFrame([]) | pandas.DataFrame |
# Copyright 2018 <NAME> <EMAIL>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import os
import warnings
import datetime
from .dataset import Dataset
from .dataframe_tools import *
from .exceptions import FailedReindexWarning, PublicationEmbargoWarning, ReindexMapError
class Pdac(Dataset):
def __init__(self, version="latest", no_internet=False):
"""Load all of the dataframes as values in the self._data dict variable, with names as keys, and format them properly.
Parameters:
version (str, optional): The version number to load, or the string "latest" to just load the latest building. Default is "latest".
no_internet (bool, optional): Whether to skip the index update step because it requires an internet connection. This will be skipped automatically if there is no internet at all, but you may want to manually skip it if you have a spotty internet connection. Default is False.
"""
# Set some needed variables, and pass them to the parent Dataset class __init__ function
# This keeps a record of all versions that the code is equipped to handle. That way, if there's a new data release but they didn't update their package, it won't try to parse the new data version it isn't equipped to handle.
valid_versions = ["1.0"]
data_files = {
"1.0": [
"clinical_table_140.tsv.gz",
"microRNA_TPM_log2_Normal.cct.gz",
"microRNA_TPM_log2_Tumor.cct.gz",
"meta_table_140.tsv.gz",
"mRNA_RSEM_UQ_log2_Normal.cct.gz",
"mRNA_RSEM_UQ_log2_Tumor.cct.gz",
"PDAC_mutation.maf.gz",
"phosphoproteomics_site_level_MD_abundance_normal.cct.gz",
"phosphoproteomics_site_level_MD_abundance_tumor.cct.gz",
"proteomics_gene_level_MD_abundance_normal.cct.gz",
"proteomics_gene_level_MD_abundance_tumor.cct.gz",
"RNA_fusion_unfiltered_normal.tsv.gz",
"RNA_fusion_unfiltered_tumor.tsv.gz",
"SCNA_log2_gene_level.cct.gz"],
}
# Call the parent class __init__ function
super().__init__(cancer_type="pdac", version=version, valid_versions=valid_versions, data_files=data_files, no_internet=no_internet)
# Load the data into dataframes in the self._data dict
loading_msg = f"Loading {self.get_cancer_type()} v{self.version()}"
for file_path in self._data_files_paths: # Loops through files variable
# Print a loading message. We add a dot every time, so the user knows it's not frozen.
loading_msg = loading_msg + "."
print(loading_msg, end='\r')
path_elements = file_path.split(os.sep) # Get a list of the levels of the path
file_name = path_elements[-1] # The last element will be the name of the file. We'll use this to identify files for parsing in the if/elif statements below
mark_normal = lambda s: s + ".N"
remove_type_tag = lambda s: s[:-2] # remove _T and similar tags from end of string
if file_name == "clinical_table_140.tsv.gz": # Note that we use the "file_name" variable to identify files. That way we don't have to use the whole path.
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.rename_axis("Patient_ID", axis="index")
df = df.sort_index()
df.columns.name = "Name"
df["Sample_Tumor_Normal"] = "Tumor"
self._data["clinical"] = df
elif file_name == "meta_table_140.tsv.gz":
df = pd.read_csv(file_path, sep='\t', index_col=0)
df = df.sort_index()
df.index.name = "Patient_ID"
df.columns.name = "Name"
self._data["derived_molecular"] = df
elif file_name == "microRNA_TPM_log2_Normal.cct.gz":
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "miRNA" in self._data:
df_tumor = self._data["miRNA"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["miRNA"] = df_combined
else:
self._data["miRNA"] = df_normal
elif file_name == "microRNA_TPM_log2_Tumor.cct.gz":
df_tumor = pd.read_csv(file_path, sep='\t', index_col=0)
df_tumor = df_tumor.sort_index()
df_tumor = df_tumor.transpose()
# merge tumor and normal if normal data has already been read
if "miRNA" in self._data:
df_normal = self._data["miRNA"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["miRNA"] = df_combined
else:
self._data["miRNA"] = df_tumor
elif file_name == "mRNA_RSEM_UQ_log2_Normal.cct.gz":
# create df for normal data
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "transcriptomics" in self._data:
df_tumor = self._data["transcriptomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["transcriptomics"] = df_combined
else:
self._data["transcriptomics"] = df_normal
elif file_name == "mRNA_RSEM_UQ_log2_Tumor.cct.gz":
# create df for tumor data
df_tumor = pd.read_csv(file_path, sep='\t', index_col=0)
df_tumor = df_tumor.sort_index()
df_tumor = df_tumor.transpose()
# merge tumor and normal if normal data has already been read
if "transcriptomics" in self._data:
df_normal = self._data["transcriptomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["transcriptomics"] = df_combined
else:
self._data["transcriptomics"] = df_tumor
elif file_name == "PDAC_mutation.maf.gz":
df = pd.read_csv(file_path, sep='\t')
df = df[["Hugo_Symbol", "Variant_Classification", "HGVSp_Short", "Tumor_Sample_Barcode"]]
df = df.rename({"Tumor_Sample_Barcode":"Patient_ID","Hugo_Symbol":"Gene","Variant_Classification":"Mutation","HGVSp_Short":"Location"}, axis='columns')
df = df.sort_values(by=["Patient_ID", "Gene"])
df = df.set_index("Patient_ID")
df = df.rename(index=remove_type_tag)
df.columns.name = "Name"
self._data["somatic_mutation"] = df
elif file_name == "phosphoproteomics_site_level_MD_abundance_normal.cct.gz":
# create df form normal data
df_normal = pd.read_csv(file_path, sep='\t')
column_split = df_normal["Index"].str.rsplit("_", n=1, expand=True)
df_normal = df_normal.assign(
Site = column_split[1],
Database_ID = column_split[0]
)
df_normal = df_normal.drop(columns="Index")
df_normal = df_normal.rename(columns={"Gene":"Name"})
df_normal = df_normal.set_index(["Name", "Site", "Peptide", "Database_ID"])
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "phosphoproteomics" in self._data:
df_tumor = self._data["phosphoproteomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
#df_combined.columns.name = "Name"
self._data["phosphoproteomics"] = df_combined
else:
self._data["phosphoproteomics"] = df_normal
elif file_name == "phosphoproteomics_site_level_MD_abundance_tumor.cct.gz":
df_tumor = pd.read_csv(file_path, sep='\t')
column_split = df_tumor["Index"].str.rsplit("_", n=1, expand=True)
df_tumor = df_tumor.assign(
Site = column_split[1],
Database_ID = column_split[0]
)
df_tumor = df_tumor.drop(columns="Index")
df_tumor = df_tumor.rename(columns={"Gene":"Name"})
df_tumor = df_tumor.set_index(["Name", "Site", "Peptide", "Database_ID"])
df_tumor = df_tumor.sort_index()
df_tumor = df_tumor.transpose()
# merge tumor and normal if normal data has already been read
if "phosphoproteomics" in self._data:
df_normal = self._data["phosphoproteomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
#df_combined.columns.name = "Name"
self._data["phosphoproteomics"] = df_combined
else:
self._data["phosphoproteomics"] = df_tumor
elif file_name == "proteomics_gene_level_MD_abundance_normal.cct.gz":
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.sort_index()
df_normal = df_normal.transpose()
df_normal = df_normal.rename(index=mark_normal)
# merge tumor and normal if tumor data has already been read
if "proteomics" in self._data:
df_tumor = self._data["proteomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["proteomics"] = df_combined
else:
self._data["proteomics"] = df_normal
elif file_name == "proteomics_gene_level_MD_abundance_tumor.cct.gz":
df_tumor = pd.read_csv(file_path, sep='\t', index_col=0)
df_tumor = df_tumor.sort_index()
df_tumor = df_tumor.transpose()
# merge tumor and normal if normal data has already been read
if "proteomics" in self._data:
df_normal = self._data["proteomics"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["proteomics"] = df_combined
else:
self._data["proteomics"] = df_tumor
elif file_name == "RNA_fusion_unfiltered_normal.tsv.gz":
df_normal = pd.read_csv(file_path, sep='\t', index_col=0)
df_normal = df_normal.rename(columns={"Sample": "Patient_ID"})
df_normal = df_normal.set_index("Patient_ID")
df_normal = df_normal.rename(index=mark_normal)
if "gene_fusion" in self._data:
df_tumor = self._data ["gene_fusion"]
df_combined = pd.concat([df_normal, df_tumor])
df_combined.index.name = "Patient_ID"
df_combined.columns.name = "Name"
self._data["gene_fusion"] = df_combined
else:
self._data["gene_fusion"] = df_normal
elif file_name == "RNA_fusion_unfiltered_tumor.tsv.gz":
df_tumor = | pd.read_csv(file_path, sep='\t', index_col=0) | pandas.read_csv |
import argparse
import logging
import pickle
from os import mkdir
from os.path import exists, join
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
# import tensorflow as tf
from typing import List, Tuple, Iterable, TypeVar
import regex
import nltk
T = TypeVar('T')
def flatten_list(iterable_of_lists: Iterable[Iterable[T]]) -> List[T]:
"""Unpack lists into a single list."""
return [x for sublist in iterable_of_lists for x in sublist]
# class Tokenizer(Configured):
# def tokenize(self, text: str) -> List[str]:
# raise NotImplementedError()
# def tokenize_with_inverse(self, text: str) -> Tuple[List[str], np.ndarray]:
# """Tokenize the text, and return start/end character mapping of each token within `text`"""
# raise NotImplementedError()
_double_quote_re = regex.compile(u"\"|``|''")
def convert_to_spans(raw_text: str, text: List[str]) -> np.ndarray:
""" Convert a tokenized version of `raw_text` into a series character
spans referencing the `raw_text` """
cur_idx = 0
all_spans = np.zeros((len(text), 2), dtype=np.int32)
for i, token in enumerate(text):
if _double_quote_re.match(token):
span = _double_quote_re.search(raw_text[cur_idx:])
tmp = cur_idx + span.start()
l = span.end() - span.start()
else:
tmp = raw_text.find(token, cur_idx)
l = len(token)
if tmp < cur_idx:
raise ValueError(token)
cur_idx = tmp
all_spans[i] = (cur_idx, cur_idx + l)
cur_idx += l
return all_spans
class NltkAndPunctTokenizer():
"""Tokenize ntlk, but additionally split on most punctuations symbols"""
def __init__(self, split_dash=True, split_single_quote=False, split_period=False, split_comma=False):
self.split_dash = split_dash
self.split_single_quote = split_single_quote
self.split_period = split_period
self.split_comma = split_comma
# Unix character classes to split on
resplit = r"\p{Pd}\p{Po}\p{Pe}\p{S}\p{Pc}"
# A list of optional exceptions, will we trust nltk to split them correctly
# unless otherwise specified by the ini arguments
dont_split = ""
if not split_dash:
dont_split += "\-"
if not split_single_quote:
dont_split += "'"
if not split_period:
dont_split += "\."
if not split_comma:
dont_split += ","
resplit = "([" + resplit + "]|'')"
if len(dont_split) > 0:
split_regex = r"(?![" + dont_split + "])" + resplit
else:
split_regex = resplit
self.split_regex = regex.compile(split_regex)
try:
self.sent_tokenzier = nltk.load('tokenizers/punkt/english.pickle')
except LookupError:
logging.info("Downloading NLTK punkt tokenizer")
nltk.download('punkt')
self.sent_tokenzier = nltk.load('tokenizers/punkt/english.pickle')
self.word_tokenizer = nltk.TreebankWordTokenizer()
def retokenize(self, x):
if _double_quote_re.match(x):
# Never split isolated double quotes(TODO Just integrate this into the regex?)
return (x, )
return (x.strip() for x in self.split_regex.split(x) if len(x) > 0)
def tokenize(self, text: str) -> List[str]:
out = []
for s in self.sent_tokenzier.tokenize(text):
out += flatten_list(self.retokenize(w) for w in self.word_tokenizer.tokenize(s))
return out
def tokenize_with_inverse(self, paragraph: str):
text = self.tokenize(paragraph)
inv = convert_to_spans(paragraph, text)
return text, inv
STOP_WORDS = frozenset([
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', 'her',
'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves',
'what', 'which', 'who', 'whom', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was',
'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing',
'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by',
'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after', 'above',
'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',
'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few',
'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', 'only', 'own', 'same', 'so', 'than',
'too', 'very', 's', 't', 'can', 'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're',
've', 'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven', 'isn', 'ma',
'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren', 'won', 'wouldn',
"many", "how", "de"
])
def is_subseq(needle, haystack):
l = len(needle)
if l > len(haystack):
return False
else:
return any(haystack[i:i+l] == needle for i in range(len(haystack)-l + 1))
def build_mnli_bias_only(out_dir):
"""Builds our bias-only MNLI model and saves its predictions
:param out_dir: Directory to save the predictions
:param cache_examples: Cache examples to this file
:param w2v_cache: Cache w2v features to this file
"""
tok = NltkAndPunctTokenizer()
def read_tsv(path, quotechar=None):
import csv
with open(path, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
def _create_mnli_examples(lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
labelmap = {"contradiction":0, "entailment":1, "neutral":2}
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = line[0]
text_a = line[8]
text_b = line[9]
if set_type == "test":
label = "contradiction"
else:
label = line[-1]
examples.append([guid, text_a, text_b, label])
return examples
def _create_qqp_examples(lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = i - 1
if set_type.startswith("test"):
label = line[3]
text_a = line[1]
text_b = line[2]
else:
text_a = line[3]
text_b = line[4]
label = line[5]
examples.append([guid, text_a, text_b, label])
return examples
dataset_to_examples = {}
QQP_training = read_tsv("/local/jyzhao/data/paws/paws_qqp/output/dev_and_test.tsv")
dataset_to_examples["paws_qqp_dev_test"] = _create_qqp_examples(QQP_training, "test")
# QQP_dev = read_tsv("/home/data/QQP/dev.tsv")
# dataset_to_examples["qqp_dev"] = _create_qqp_examples(QQP_dev, "dev")
# MNLI_training = read_tsv("/home/data/MNLI/dev.tsv")
# dataset_to_examples["mnli_dev"] = _create_mnli_examples(MNLI_training, "dev")
# Our models will only distinguish entailment vs (neutral/contradict)
for examples in dataset_to_examples.values():
for i, ex in enumerate(examples):
if ex[3] == 2:
examples[i][3] = 0
negations = ["not", "no", "n't", "never", "nothing", "none", "nobody", "nowhere", "neither"]
# Build the features, store as a pandas dataset
dataset_to_features = {}
for name, examples in dataset_to_examples.items():
# tf.logging.info("Building features for %s.." % name)
print("Building features for %s.." % name)
features = []
for example in examples:
h = [x.lower() for x in tok.tokenize(example[2])]
p = [x.lower() for x in tok.tokenize(example[1])]
p_words = set(p)
neg_in_h = sum(x in h for x in negations)
n_words_in_p = sum(x in p_words for x in h)
fe = {
"h-is-subseq": 1 if is_subseq(h, p) else 0,
"all-in-p": 1 if n_words_in_p == len(h) else 0,
"percent-in-p": n_words_in_p / len(h),
"log-len-diff": np.log(max(len(p) - len(h), 1)),
"neg-in-h": 1 if neg_in_h > 0 else 0,
"label": example[-1],
}
# h_vecs = [w2v[w] for w in example.hypothesis if w in w2v]
# p_vecs = [w2v[w] for w in example.premise if w in w2v]
# if len(h_vecs) > 0 and len(p_vecs) > 0:
# h_vecs = np.stack(h_vecs, 0)
# p_vecs = np.stack(p_vecs, 0)
# # [h_size, p_size]
# similarities = np.matmul(h_vecs, p_vecs.T)
# # [h_size]
# similarities = np.max(similarities, 1)
# similarities.sort()
# fe["average-sim"] = similarities.sum() / len(h)
# fe["min-similarity"] = similarities[0]
# if len(similarities) > 1:
# fe["min2-similarity"] = similarities[1]
fe["average-sim"] = fe["min-similarity"] = fe["min2-similarity"] = 0
features.append(fe) #for the wordemb similarity; now only use the psuedo number
dataset_to_features[name] = | pd.DataFrame(features) | pandas.DataFrame |
import plotly.graph_objs as go
from dash import Dash
from dash.dependencies import Input, Output, State
from dash_extensions.snippets import send_data_frame
from pandas import DataFrame
def download_callback(app: Dash, card_id: str, graph_id: str, download_id: str) -> None:
@app.callback(
Output(download_id, "data"),
Input(card_id, "action_click"),
State(graph_id, "figure"),
State(card_id, "title"),
prevent_initial_call=True
)
def __download_callback(action_click: str, figure: go.Figure, title: str):
if figure:
d = figure['data']
# Fell free to extend the download callback to support more graphs types
if d[0]['type'] == 'scattergl':
output = __download_scatter(d)
else:
output = __download_bar(d)
if action_click.startswith('download_excel'):
return send_data_frame(output.to_excel, title + '.xlsx', index=False)
return send_data_frame(output.to_csv, title + '.csv', index=False)
return ''
def __download_scatter(d):
output = DataFrame()
for element in d:
df = | DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import argparse
from misc import *
import pandas as pd
DEFPATH = "/home/bakirillov/HDD/weights/fasttext/aligned/wiki.en.align.vec"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v", "--vectors",
dest="vectors",
action="store",
help="Path to file with aligned vectors",
default=DEFPATH
)
parser.add_argument(
"-s", "--study",
dest="study",
action="store",
help="Path to study data file",
default="en_study.pkl"
)
parser.add_argument(
"-p", "--participant",
dest="participant",
action="store",
default="all",
help="the participant id"
)
parser.add_argument(
"-o", "--output",
dest="output",
action="store",
help="set the path of output file"
)
parser.add_argument(
"-w", "--what",
dest="what",
action="store",
choices=["wv", "1hot"],
default="wv",
help="set the type of output"
)
args = parser.parse_args()
study = Study.load_from_file(args.study)
if args.participant == "all":
word_aucs = study.compute_word_aucs()
words = word_aucs.index
else:
words = study[int(args.participant)][1][2]
if args.what == "wv":
if "vec" in args.vectors:
data = load_vectors(
args.vectors, words
)
else:
data = pd.read_csv(args.vectors, index_col=0).T[0:-1]
elif args.what == "1hot":
if "_1hot_" not in args.output:
data = {a: Study.onehot(a) for a in words}
else:
data = {a: [a] for a in words}
if args.participant == "all":
| pd.DataFrame(data) | pandas.DataFrame |
import torch
import pandas as pd
from tqdm import tqdm
import cv2
import numpy as np
from torch.utils.data import DataLoader, Dataset
from data import TestDataset,mask2rle
from albumentations import Compose,Normalize,HorizontalFlip,VerticalFlip
from model_kaggle import Unet
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
def TTA(image,model):
#get average of multi version of test image augmentation
#batch size must be 1
#imput: img:[256,1600,3],torch.Tensor
#output: pred_result:[4,256,1600],np.array
h,w,c = image.shape
horizon_trans = Compose([HorizontalFlip(p=1)])
vertical_trans = Compose([VerticalFlip(p=1)])
rotate_trans = Compose([HorizontalFlip(p=1),VerticalFlip(p=1)])
none_trans = Compose([])
trans_zoo = [horizon_trans,vertical_trans,rotate_trans,none_trans]
pred_total = np.empty((len(trans_zoo),h,w,4))
for i,tran in enumerate(trans_zoo):
#img->norm+trans->predict->pred_mask->re-trans
#numpy.array
img_aug = tran(image=image.numpy())['image'].squeeze() #[256,1600,3]
#img_aug = normal_trans(image=img_aug)['image'].squeeze()
img_aug = torch.from_numpy(img_aug).permute((2,0,1)).unsqueeze(0).cuda() #[1,3,256,1600]
pred_aug = model(img_aug)
pred_aug = F.sigmoid(pred_aug).detach().cpu().numpy()#[1,4,256,1600]
pred_aug = pred_aug.squeeze().transpose((1,2,0)) #[256,1600,4]
pred_recover = tran(image=pred_aug)['image'].squeeze() #[256,1600,4]
pred_total[i] = pred_recover
pred_result = np.mean(pred_total,axis=0) #[256,1600,4]
return pred_result.transpose((2,0,1)) #[4,256,1600]
def post_process(probability, threshold, min_size):
'''Post processing of each predicted mask, components with lesser number of pixels
than `min_size` are ignored'''
mask = (probability>threshold)
#cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
predictions = np.zeros((256, 1600), np.float32)
num = 0
for c in range(1, num_component):
p = (component == c)
if p.sum() > min_size:
predictions[p] = 1
num += 1
return predictions, num
if __name__ == '__main__':
sample_submission_path = 'input/severstal-steel-defect-detection/sample_submission.csv'
test_data_folder = "input/severstal-steel-defect-detection/test_images"
# initialize test dataloader
best_threshold = [0.5,0.5,0.55,0.55]
num_workers = 6
batch_size = 4
print('best_threshold', best_threshold)
min_size = [800,2200,1000,3800]
mean = (0.485, 0.456, 0.406), # (0.39, 0.39, 0.39),
std = (0.229, 0.224, 0.225), # (0.17, 0.17, 0.17),
df = | pd.read_csv(sample_submission_path) | pandas.read_csv |
import gc
import re
import sys
import warnings
import os
import time
from datetime import datetime
import warnings
import pandas as pd
import numpy as np
import hashlib
from collections import defaultdict,Counter
from .sequence_functions import list_diff_outer_join, lcs, filter_lcs
from .os_functions import *
from .df_functions import *
from .config_table import ConfigReader
from .excel_functions import write_format_columns
from .regex_functions import replace_re_special, replace_punctuations
from .decorator_functions import *
from .data_handle_func import *
from pandas.core.indexes.multi import MultiIndex
class CsvSheetClass(object):
def __init__(self, table_path):
self.name = '{}'.format(table_path)
self.visibility = 0
class Handler(object):
def __init__(self, require_file_dir, input_dir,table_dict):
#获取以下两个即可读取所有的原始数据和规则表
#original data files
self.input_dir = input_dir
#config table dict
self.table_dict = table_dict
self.require_file_dir = require_file_dir
# #从concat_data得到的结果记录输入数据的最大最小日期
self.min_max_date_range = ''
#concat_data之后 保存一个原始表提供给后面的match做提取
self.original_complete_header_df = None
@catch_and_print
def get_original2cn_dict(self, header_table_df, file_tag):
"""
将所有原始mapping成中文表头,按国家区分字典
"""
original2cn_dict_list = []
original2cn_dict = defaultdict(str)
fillna_dict = {}
dtype_dict = {}
if file_tag.lower() not in [ x.lower() for x in header_table_df.columns.get_level_values(0) ] :
file_tag = 'Without file tag'
header_column_index = header_table_df.columns.get_level_values(
0) == file_tag.lower()
header_table_df_c = header_table_df.iloc[:, header_column_index]
header_table_first_three_c = header_table_df.loc[:, header_table_df.columns.get_level_values(0)[0]]
# 同时获取填充的
for row, last_three in zip(header_table_df_c.iterrows(), header_table_first_three_c.iterrows()):
# 表头统一小写,换行符,空格全部去掉
row_list = row[1].values
last_three_list = last_three[1].values
a_list = list(row_list)
b_list = list(last_three_list)
a_list = [str(x).lower().strip().replace('\n', '').replace('\xa0', '').replace(' ', '').replace('\t', '')
for x in a_list if x.strip() != '无' and x.strip().lower() != 'none' and x.strip() != '/' and x.strip() != '']
if a_list:
for x in a_list:
original2cn_dict[x] = b_list[2]
# 构建需要合并前填充的字典
c_list = [x for x in a_list if split_colon(x)[0].lower().strip() == 'fillbeforeconcat' or split_colon(x)[0].strip() == '合并前填充']
if c_list:
for x in c_list:
fillna_dict[b_list[2]] = split_colon(x)[1]
if (b_list[1] != '默认' and b_list[1].lower() != 'default' and b_list[1] != '') and b_list[2] != '':
dtype_dict.update({b_list[2]: b_list[1]})
return original2cn_dict, fillna_dict, dtype_dict
#合并读取的数据表格, 该函数需要输入table_dict因为需要读取到, complete_header_df, 和target_cn_columns
@get_run_time
def concat_data(self ):
# 此函数读取放入的数据表,必须要运行
for keys in self.table_dict.keys():
if 'mapping' in keys.lower():
mapping_key = keys
try:
header_table_df = self.table_dict[mapping_key]
except KeyError:
enter_exit('Cannot find mapping configuration sheet!')
complete_header_df = self.table_dict['complete_header_df']
target_cn_columns = self.table_dict['target_cn_columns']
header_table_df = df_fillna_str(header_table_df)
info_path_list = get_walk_abs_files(self.input_dir)
# 检查是否有读取到各国的原始数据
info_path_list = [x for x in info_path_list if '~$' not in x and (
x[-5:].lower() == '.xlsx' or x[-4:].lower() in ['.xls', '.csv'])]
if len(info_path_list) == 0:
enter_exit(f'Cannot find any data file in folder "{self.input_dir}" !\n')
success_sheet_df_list = []
for table_path in info_path_list:
table_p = Path(table_path)
table_stem = table_p.stem
table_suffix = table_p.suffix
# 读取文件名的信息
file_tag = table_stem.split('-')[0].split('_')[0].strip()
# 获取这个文档的映射字典 将原始mapping成中文表头
original2cn_dict, fillna_dict, dtype_dict = self.get_original2cn_dict(header_table_df, file_tag)
if not original2cn_dict:
enter_exit('"Data_processing_configuration" required mapping field "{}" not found !'.format(file_tag))
# 如果是CSV文档
is_csv = False
is_xls_special = False
if table_suffix == '.csv':
is_csv = True
csv_sheet_class = CsvSheetClass(table_stem)
sheets_property_list = [csv_sheet_class]
else:
try:
df_workbook = pd.ExcelFile(table_path)
sheets_property_list = df_workbook.book.sheets()
#试下能不能读取第一个sheet
df_workbook.parse(str(sheets_property_list[0].name))
except : #如果读取失败,尝试读取其他国家xls文档的格式
is_xls_special = True
xls_sheet_class = CsvSheetClass(table_stem)
sheets_property_list = [xls_sheet_class]
# 过滤掉模板数据
for sheets_property in sheets_property_list:
sheet = sheets_property.name
sheet_visibility = sheets_property.visibility
if sheet_visibility == 0: # 只读取可见的Sheet
if is_csv:
df_worksheet = read_csv_data(table_path)
if df_worksheet.empty:
continue
elif is_xls_special: #这个格式的只读第一个sheet
df_worksheet = read_xls_special(table_path)
if df_worksheet.empty:
continue
else:
df_worksheet = df_workbook.parse(str(sheet), na_values='')
# 表头做小写等替换并且,通过字典rename,全部调整成去掉中间空格、去掉一切无意义符号的字段
df_worksheet.columns = [str(x).lower().strip().replace('\n', '').replace('\xa0', '')
.replace(' ', '').replace('\t', '')if x == x else x for x in
df_worksheet.columns]
df_worksheet = dropping_not_mapping(df_worksheet, original2cn_dict, target_cn_columns)
#mapping填入了 + 号
df_worksheet = combine_multi_plus(df_worksheet, original2cn_dict)
#mapping前检查是否有重复的字段,如果原表已经有别的字段映射成"机型",那原表里面的"机型"字段属于要抛弃的字段
df_work_sheet = drop_duplicated_columns_before_rename(df_worksheet, original2cn_dict)
df_worksheet = df_worksheet.rename(original2cn_dict, axis=1)
# 还必须要确认映射的字段没有重复,否则会影响到后面的数据列, 返回一个没有重复的字段列
df_work_sheet = check_mapping_duplicates(df_worksheet, target_cn_columns, table_stem=table_stem)
# 重命名之后,合并前需要填充默认值
df_worksheet = fillna_with_dict(df_worksheet, fillna_dict)
# 检查完重复映射之后 需要再定位一次需要的字段, 注意处理顺序
df_worksheet = func_loc(df_worksheet, target_cn_columns)
if not df_worksheet.empty:
check_mapping_complete(df_worksheet, complete_header_df, original2cn_dict,file_tag=file_tag)
#做一次字段格式处理,可以提示在哪个文档转错,但后面合并还是会把date转成object,所以需要再转一次
complete_header_df = dtype_handle(complete_header_df, dtype_dict)
# 记录成功表格
success_sheet_df_list.append([table_stem, sheet, df_worksheet.shape[0]])
#complete_header_df 是一个完整的表头,可以避免concat造成的表头混乱/缺失,
#但合并会导致字段全变成object(CSV文档,xlsx的输入不受影响)
complete_header_df = | pd.concat([complete_header_df, df_worksheet], axis=0, sort=False, ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 3 11:22:49 2017
@author: tkc
"""
import pandas as pd
import os
import datetime
import sys
import numpy as np
import pkg.SC_messaging_functions as SCmess
import pkg.SC_schedule_functions as SCsch
import pkg.SC_config as cnf # specifies input/output file directories
#%%
from importlib import reload
reload(SCsch)
reload(SCmess)
#%% Download from google sheets Cabrini basketball schedule
sheetID = '1-uX2XfX5Jw-WPw3YBm-Ao8d2DOzou18Upw-Jb6UiPWg'
rangeName = 'Cabrini!A:G'
cabsched = SCapi.downloadSheet(sheetID, rangeName)
#%% Load of other commonly needed info sheets
teams=pd.read_csv(cnf._INPUT_DIR +'\\Teams_2019.csv', encoding='cp437')
coaches=pd.read_csv(cnf._INPUT_DIR +'\\coaches.csv', encoding='cp437')
fields=pd.read_csv(cnf._INPUT_DIR+'\\fields.csv', encoding='cp437')
Mastersignups = pd.read_csv(cnf._INPUT_DIR +'\\\master_signups.csv', encoding='cp437')
players, famcontact = SC.loadProcessPlayerInfo() # version w/o signup processing
season='Winter'
year=2019
#%% Create all schedules and write to text log (non-auto emailed version)
emailtitle='Game Schedules for $TEAMNAME'
blankmess=SCmess.readMessage() # choose and read blank message from chosen *.txt
cabsched=SCsch.alterSchedule(cabsched) # day and division changes for consistency to previous
# write of all team schedules to parent_email_log (default choice)
SCmess.sendschedule(teams, cabsched, fields, Mastersignups, coaches, year, famcontact, emailtitle, blankmess)
#%%
# Read tentative google drive Pat Moore schedules (after delete of cols before header row)
sched= | pd.read_csv(cnf._OUTPUT_DIR+'\\Schedules\\Bball2019_full_schedule.csv') | pandas.read_csv |
# This script creates a data set consisting of county-year observations of pollutants
# Importing required modules
import pandas as pd
# Defining the directory in which the data resides
path = r'C:\Users\User\Documents\Data\PMxCounty\conreport'
# Initializing the data structure
data = | pd.read_csv(path + '1980.csv') | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 23:51:08 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
from functools import partial
from american_option_pricing import american_option
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('data_v3.xlsx', index_col=None)
current_date = date(2020,7,24)
expiry_date = date(2020,8,7)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
max_quantity_per_leg = 5
min_e_pnl = 0
min_p_profit = 30
max_cost = 750
max_loss = 750
mode = "rule_based" #"all_combinations"/"rule_based" - Always keep rule based
save_results = False
Strategies = ["Bear Call Spread","Bull Call Spread", \
"Bull Put Spread", "Bear Put Spread",\
"Bull Put Ladder", "Bear Call Ladder",\
"Long Straddle", "Long Strangle", \
"Long Strap", "Long Strip",\
# "Short Straddle", "Short Strangle", \
"Long Call Butterfly", "Long Put Butterfly",\
"Short Call Butterfly", "Short Put Butterfly",\
"Long Iron Butterfly", "Short Iron Butterfly",\
"Long Call Condor", "Long Put Condor", \
"Short Call Condor", "Short Put Condor", \
"Long Iron Condor", "Short Iron Condor", \
"Long Box"\
]
Strategies = []
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
#rf_eod_data = yf.download("^IRX", start="1993-01-01", end="2019-11-15")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = pd.to_numeric(rf_eod_data[col],errors='coerce')
rf_eod_data=rf_eod_data.fillna(method='ffill')
rf_eod_data['interest']=((1+(rf_eod_data['Adj Close']/100))**(1/252))-1
rf_eod_data['annualized_interest']=252*(((1+(rf_eod_data['Adj Close']/100))**(1/252))-1)
rf_value =rf_eod_data['annualized_interest'].iloc[-1]
print("Current Risk Free Rate is :",'{:.3f}%'.format(rf_value*100))
"""
#######################################################################################
Data Cleaning
#######################################################################################
"""
def wrang_1(df, col_names):
for col in col_names:
df[col] = df[col].str.rstrip('%')
df[col] = pd.to_numeric(df[col],errors='coerce')
df[col] = [float(x)/100.0 for x in df[col].values]
return df
convert_cols = ["Impl Vol", "Prob.ITM","Prob.OTM","Prob.Touch"]
data = wrang_1(data,convert_cols)
def label_type(row):
if row['Symbol'][0] == "." :
return 'Option'
return 'Stock'
data['Type']=data.apply(lambda row: label_type(row), axis=1)
data['Expiry_Date']= data.Symbol.str.extract('(\d+)')
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: pd.to_datetime(str(x), format='%y%m%d'))
expiry_date_str = expiry_date.strftime("%Y%m%d")
data['Expiry_Date'] = data['Expiry_Date'].fillna( | pd.Timestamp(expiry_date_str) | pandas.Timestamp |
import os
import numpy as np
import scipy as sc
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
import seaborn as sns
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
from scipy.spatial import distance
from scipy.stats import gaussian_kde, binom
from numpy.random import RandomState
rand = RandomState()
import pickle
import pystan
from mycolours import *
def my_plot_configs():
plt.style.use('seaborn-paper')
plt.rcParams["figure.frameon"] = False
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Helvetica'
plt.rcParams['axes.labelweight'] = 'bold'
def fig_save(fig, Plot_Folder, fname):
fig.savefig(os.path.join (Plot_Folder, fname),dpi=500)
fig.savefig(os.path.join (Plot_Folder, fname + "." + 'pdf'), format='pdf', Transparent=True)
# fig.savefig(os.path.join(Plot_Folder, fname + "." + 'pdf'), format='pdf')
fig.savefig(os.path.join (Plot_Folder, fname + "." + 'svg'), format='svg')
class DSA1():
def __init__(self, df=None, df_main=None, a=0.1, b=0.6, g=0.2, d=0.4, l=0.0, r_I=1e-6, r_V=1e-6, parent=None, **kwargs):
self.df = df
self.df_main = df_main
self.a = a
self.b = b
self.g = g
self.d = d
self.l = l
self.r_I = r_I
self.r_V = r_V
self.parent = parent
self.T = np.ceil(self.df['exit_time'].max())
if kwargs.get('timepoints') is None:
self.timepoints = np.linspace(0.0, self.T, 10000)
else:
self.timepoints = kwargs.get('timepoints')
@classmethod
def SVIR_ODE(cls, t, y, a, b, g, d, l):
dydt = np.zeros(4)
dydt[0] = -b*y[0]*y[2] - d*y[0]
dydt[1] = d*y[0] - a*l*y[1] - (1-a)*b*y[1]*y[2]
dydt[2] = b*y[0]*y[2] + (1-a)*b*y[1]*y[2] - g*y[2]
dydt[3] = a*l*y[1] + g*y[2]
return dydt
@classmethod
def SVIR_Extended_ODE(cls, t, y, a, b, g, d, l):
dydt = np.zeros(5)
dydt[0] = -b*y[0]*y[2] - d*y[0]
dydt[1] = d*y[0] - a*l*y[1] - (1 - a)*b*y[1]*y[2]
dydt[2] = b*y[0]*y[2] + (1 - a)*b*y[1]*y[2] - g*y[2]
dydt[3] = a*l*y[1] + g*y[2]
dydt[4] = -b*y[0]*y[2] - (1 - a)*b*y[1]*y[2]
# dydt[4] = -b * y[0] * y[2]
return dydt
@classmethod
def draw_parms_prior(cls, a_bound=(0.09, 0.11),
b_bound=(1 / 5.6, 0.75),
g_bound=(0.5 / 5.6, 2 / 5.6),
d_bound=(0.4, 1.0),
l_bound=(0, 1e-3),
r_V_bound=(0.15, 0.25),
r_I_bound=(1e-6, 5e-1),
nSample=1):
a_sample = np.random.uniform(low=a_bound[0], high=a_bound[1], size=nSample)
b_sample = np.random.uniform(low=b_bound[0], high=b_bound[1], size=nSample)
g_sample = np.random.uniform(low=g_bound[0], high=g_bound[1], size=nSample)
d_sample = np.random.uniform(low=d_bound[0], high=d_bound[1], size=nSample)
l_sample = np.random.uniform(low=l_bound[0], high=l_bound[1], size=nSample)
r_V_sample = np.random.uniform(low=r_V_bound[0], high=r_V_bound[1], size=nSample)
r_I_sample = np.random.uniform(low=r_I_bound[0], high=r_I_bound[1], size=nSample)
return a_sample, b_sample, g_sample, d_sample, l_sample, r_V_sample, r_I_sample
@property
def R0(self):
return 1.0 * self.b/self.g
@property
def kT(self):
if self.parent is None:
return self.df['exit_time'].shape[0]
else:
return self.parent.kT
@property
def rescale(self):
return 1 - self.S(self.T)
@property
def n(self):
return self.kT / self.rescale
@property
def sT(self):
return self.n - self.kT
@property
def theta(self):
return [self.a, self.b, self.g, self.d, self.l, self.r_I, self.r_V]
@property
def S(self):
a, b, g, d, l, r_I, r_V = self.theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V, self.r_I, 0.0]
ode_fun = lambda t, y: DSA.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
return S
def add_fits(self, samples):
fits = []
l = np.size(samples, axis=0)
for i in range(l):
a, b, g, d, l, r_I, r_V = samples[i]
fit = DSA1(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V, parent=self)
fits.append(fit)
self.fits = fits
return self
def compute_density(self, theta):
a, b, g, d, l, r_I, r_V = theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V, self.r_I, 0.0]
ode_fun = lambda t, y: DSA1.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
I = interp1d(t_eval, sol.y[2])
out = []
ST = S(self.T)
for x in self.timepoints:
Sx = S(x)
Ix = I(x)
out.append((b*Sx*Ix + d*Sx)/(1-ST))
return out
def plot_density_fit_posterior(self, samples):
nSamples = np.size(samples, axis=0)
Ds = np.zeros((nSamples, len(self.timepoints)), dtype=np.float)
for idx in range(nSamples):
Ds[idx] = self.compute_density(samples[idx])
Dslow = np.quantile(Ds, q=0.025, axis=0)
Dshigh = np.quantile(Ds, q=0.975, axis=0)
Dmean = np.mean(Ds, axis=0)
fig = plt.figure()
plt.plot(self.timepoints, Dmean, '-', color=forrest['forrest3'].get_rgb(), lw=3)
plt.plot(self.timepoints, Dslow, '--', color=forrest['forrest3'].get_rgb(), lw=1)
plt.plot(self.timepoints, Dshigh, '--', color=forrest['forrest3'].get_rgb(), lw=1)
# plt.axvline(x=self.T, color=junglegreen['green3'].get_rgb(), linestyle='-')
mirrored_data = (2 * self.T - self.df['exit_time'].values).tolist()
combined_data = self.df['exit_time'].values.tolist() + mirrored_data
dense = gaussian_kde(combined_data)
denseval = list(dense(x) * 2 for x in self.timepoints)
plt.plot(self.timepoints, denseval, '-', color=purplybrown['purplybrown4'].get_rgb(), lw=3)
plt.fill_between(self.timepoints, Dslow, Dshigh, alpha=.3, color=forrest['forrest1'].get_rgb())
plt.legend()
plt.ylabel('$-\dot{S}_t/(1-S_T)$')
plt.xlabel('t')
c = cumtrapz(Dmean, self.timepoints)
ind = np.argmax(c >= 0.001)
plt.xlim((self.timepoints[ind], self.timepoints[-1] + 1))
sns.despine()
return fig
@classmethod
def prob_test_positive(cls, t, T, theta, lag=60):
a, b, g, d, l, r_I, r_V = theta
# T = self.T
t_span = [0, T + 1]
t_eval = np.linspace(0.0, T + 1, 100000)
y0 = [1.0, r_V, r_I, 0.0, 1.0]
ode_fun = lambda t, y: DSA1.SVIR_Extended_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval,
events=None, vectorized=False, args=None)
S = interp1d(t_eval, sol.y[0])
S_I = interp1d(t_eval, sol.y[4])
if t < lag:
test_pos_prob = (1.0 - S_I(t))
# test_pos_prob = (1.0 - S_I(t))/(1-S(T))
else:
test_pos_prob = (S_I(t - lag) - S_I(t))
# test_pos_prob = (S_I(t-21) - S_I(t))/(1-S(T))
return test_pos_prob
@classmethod
def binom_likelihood(cls, df_main, theta):
nDates = df_main.time.size
total_tests = df_main.daily_test.values
daily_pos = df_main.daily_positive.values
T = (df_main.time.max() - df_main.time.min()).days + 1
loglikelihood = 0.0
for d in range(nDates):
test_pos_prob = DSA1.prob_test_positive(d + 1, T, theta=theta)
loglikelihood = loglikelihood + binom.logpmf(daily_pos[d], total_tests[d], test_pos_prob, loc=0)
return -loglikelihood
def children_daily_test_pos_prediction(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
total_tests = df_main.children_daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA1.prob_test_positive(d + 1, T, sample)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], test_pos_prob, size=1)
return predicted_test_pos
def daily_test_pos_prediction(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
# dates = df_main.time.values
total_tests = df_main.daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA.prob_test_positive(d+1, T, sample)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], test_pos_prob, size=1)
return predicted_test_pos
def daily_test_pos_probabilities(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
test_pos_probabilities = np.zeros(nDates, dtype=np.float64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_probabilities[d] = DSA1.prob_test_positive(d+1, T, sample)
return test_pos_probabilities
def compare_test_pos_probabilities(self, samples, theta=None):
nSamples = np.size(samples, axis=0)
dates = self.df_main.time
nDays = len(dates)
test_pos_probabilities = np.zeros((nSamples, nDays), dtype=np.float64)
if theta is None:
theta = np.mean(samples, axis=0)
for i in range(nSamples):
sample = samples[i]
test_pos_probabilities[i] = self.daily_test_pos_probabilities(sample=sample)
m = np.mean(test_pos_probabilities, axis=0)
median = np.quantile(test_pos_probabilities, q=0.5, axis=0)
low = np.quantile(test_pos_probabilities, q=0.025, axis=0)
high = np.quantile(test_pos_probabilities, q=0.975, axis=0)
my_plot_configs()
fig = plt.figure()
lmedian, = plt.plot(self.df_main['time'].values, median, '-.', color=forrest['forrest5'].get_rgb(), lw=3,
label='Median')
lm, = plt.plot(self.df_main['time'].values, median, '-', color=forrest['forrest3'].get_rgb(), lw=3,
label='Mean')
l3, = plt.plot(self.df_main['time'].values, low, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
l4, = plt.plot(self.df_main['time'].values, high, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
# l5, = plt.fill_between(self.df_main['time'].values, low, high, alpha=.1, color=forrest['forrest1'].get_rgb())
l7, = plt.plot(self.df_main['time'].values, self.df_main['daily_pct_positive'].values, '-',
color=maroons['maroon3'].get_rgb(),
lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Daily percent positive')
# plt.ylim(0.0, 1.0)
plt.legend(handles=[lmedian, l7])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
# my_dict['Dates'] = dates['d']
my_dict['Dates'] = dates
my_dict['Mean'] = m
# my_dict['MLE'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict = pd.DataFrame(my_dict)
# my_dict.to_csv(os.path.join(Plot_Folder, fname + '.csv'), index=False)
return fig, my_dict
def compare_fit_binomial(self, samples, theta=None):
nSamples = np.size(samples, axis=0)
dates = self.df_main.time
nDays = len(dates)
time_points = np.arange(nDays)
daily_positive = np.zeros((nSamples, nDays), dtype=np.int64)
if theta is None:
theta = np.mean(samples, axis=0)
for i in range(nSamples):
sample = samples[i]
daily_positive[i] = self.daily_test_pos_prediction(sample)
m = np.int64(np.mean(daily_positive, axis=0))
median = np.int64(np.quantile(daily_positive, q=0.5, axis=0))
low = np.int64(np.quantile(daily_positive, q=0.025, axis=0))
high = np.int64(np.quantile(daily_positive, q=0.975, axis=0))
my_plot_configs()
fig = plt.figure()
lmedian, = plt.plot(self.df_main['time'].values, median, '-.', color=forrest['forrest5'].get_rgb(), lw=3, label='Median')
lm, = plt.plot(self.df_main['time'].values, median, '-', color=forrest['forrest3'].get_rgb(), lw=3, label='Mean')
l3, = plt.plot(self.df_main['time'].values, low, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
l4, = plt.plot(self.df_main['time'].values, high, '--', color=forrest['forrest2'].get_rgb(), lw=1.5)
# l5, = plt.fill_between(self.df_main['time'].values, low, high, alpha=.1, color=forrest['forrest1'].get_rgb())
l7, = plt.plot(self.df_main['time'].values, self.df_main['daily_positive'].values, '-', color=maroons['maroon3'].get_rgb(),
lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Daily cases')
# plt.ylim(0, 2000000)
plt.legend(handles=[lmedian, l7])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
# my_dict['Dates'] = dates['d']
my_dict['Dates'] = dates
my_dict['Mean'] = m
# my_dict['MLE'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict = pd.DataFrame(my_dict)
# my_dict.to_csv(os.path.join(Plot_Folder, fname + '.csv'), index=False)
return fig, my_dict
def compare_I(self, samples, df, dates, n0=1, d0=0, theta=None):
nSamples = np.size(samples, axis=0)
nDays = len(dates)
time_points = np.arange(nDays)
mean = np.zeros((nSamples, nDays), dtype=np.float)
mean_daily = np.zeros((nSamples, nDays), dtype=np.float)
if theta is not None:
theta = np.mean(samples, axis=0)
t_span = [0, nDays + 1]
t_eval = np.linspace(0.0, nDays + 1, 100000)
my_plot_configs()
fig_a = plt.figure()
for i in range(nSamples):
a, b, g, d, l, r_I, r_V = samples[i]
epi = DSA1(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V)
n = epi.n
y0 = [1.0, r_V, r_I, 0.0]
ode_fun = lambda t, y: DSA1.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
I = interp1d(t_eval, sol.y[2])
mean_daily[i] = np.asarray(list(n * I(x) for x in time_points))
mean[i] = np.cumsum(mean_daily[i]) + n0
m = np.int64(np.ceil(np.mean(mean_daily, axis=0)))
median = np.int64(np.percentile(mean_daily, 50.0, axis=0))
low = np.int64(np.ceil(np.quantile(mean_daily, q=0.025, axis=0)))
high = np.int64(np.ceil(np.quantile(mean_daily, q=0.975, axis=0)))
# a, b, g, d, l, r_I, r_V = theta
# epi = DSA(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V)
# n = epi.n
# y0 = [1.0, r_I, r_I, 0.0]
# ode_fun = lambda t, y: DSA.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
# sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
# I = interp1d(t_eval, sol.y[2])
# mle = np.asarray(list(n * I(x) for x in time_points))
# l2mle, = plt.plot(dates['d'].dt.date, mle, '-.', color=greys['grey2'].get_rgb(), lw=3, label='Prediction')
l2median, = plt.plot(dates['d'].dt.date, median, '-.', color=cyans['cyan5'].get_rgb(), lw=3, label='Median')
l2, = plt.plot(dates['d'].dt.date, m, '-', color=cyans['cyan5'].get_rgb(), lw=3, label="Mean")
l3 = plt.plot(dates['d'].dt.date, low, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l4 = plt.plot(dates['d'].dt.date, high, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l5 = plt.fill_between(dates['d'].dt.date, low, high, alpha=.1, color=cyans['cyan1'].get_rgb())
l7 = plt.plot(df['time'].values, df['daily_positive'].values, '-', color=maroons['maroon3'].get_rgb(),
lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Daily cases')
# plt.ylim(0, 2000000)
# plt.legend(handles=[l2mle, l7])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
my_dict['Dates'] = dates['d']
# my_dict['Dates'] = dates
my_dict['Mean'] = m
# my_dict['MLE'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict = pd.DataFrame(my_dict)
# my_dict.to_csv(os.path.join(Plot_Folder, fname + '.csv'), index=False)
return fig_a, my_dict
def compare_IV(self, samples, df, dates, n0=1, d0=0, theta=None):
nSamples = np.size(samples, axis=0)
nDays = len(dates)
time_points = np.arange(nDays)
mean = np.zeros((nSamples, nDays), dtype=np.float)
mean_daily = np.zeros((nSamples, nDays), dtype=np.float)
if theta is not None:
theta = np.mean(samples, axis=0)
t_span = [0, nDays + 1]
t_eval = np.linspace(0.0, nDays + 1, 100000)
my_plot_configs()
fig_a = plt.figure()
for i in range(nSamples):
a, b, g, d, l, r_I, r_V = samples[i]
epi = DSA1(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V)
n = epi.n
y0 = [1.0, r_V, r_I, 0.0]
ode_fun = lambda t, y: DSA1.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
S = interp1d(t_eval, sol.y[0])
I = interp1d(t_eval, sol.y[2])
mean_daily[i] = np.asarray(list(n * I(x) for x in time_points))
mean[i] = np.asarray(list(n * (1-S(x)) + n0 for x in time_points))
m = np.int64(np.ceil(np.mean(mean, axis=0)))
median = np.int64(np.percentile(mean, 50.0, axis=0))
low = np.int64(np.ceil(np.quantile(mean, q=0.025, axis=0)))
high = np.int64(np.ceil(np.quantile(mean, q=0.975, axis=0)))
# a, b, g, d, l, r_I, r_V = theta
# epi = DSA(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V)
# n = epi.n
# y0 = [1.0, r_I, r_I, 0.0]
# ode_fun = lambda t, y: DSA.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
# sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
# S = interp1d(t_eval, sol.y[0])
# I = interp1d(t_eval, sol.y[2])
# # mle = np.cumsum(np.asarray(list(n * I(x) for x in time_points))) + n0
# mle = np.asarray(list(n * (1-S(x)) + n0 for x in time_points))
# l2mle, = plt.plot(dates['d'].dt.date, mle, '-.', color=greys['grey2'].get_rgb(), lw=3, label='Prediction')
l2median, = plt.plot(dates['d'].dt.date, median, '-.', color=cyans['cyan5'].get_rgb(), lw=3, label='Median')
l2, = plt.plot(dates['d'].dt.date, m, '-', color=cyans['cyan5'].get_rgb(), lw=3, label="Mean")
l3 = plt.plot(dates['d'].dt.date, low, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l4 = plt.plot(dates['d'].dt.date, high, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l5 = plt.fill_between(dates['d'].dt.date, low, high, alpha=.1, color=cyans['cyan1'].get_rgb())
l7 = plt.plot(df['time'].values, df['cumulative_positive'].values + df['cumulative_dose1'].values, '-', color=maroons['maroon3'].get_rgb(),
lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Cumulative transfers')
# plt.ylim(0, 2000000)
# plt.legend(handles=[l2mle, l7])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
my_dict['Dates'] = dates['d']
my_dict['Mean'] = m
# my_dict['MLE'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict = pd.DataFrame(my_dict)
# my_dict.to_csv(os.path.join(Plot_Folder, fname + '.csv'), index=False)
return fig_a, my_dict
def no_vaccination_scenario(self, samples, df, dates, n0=1, d0=0, theta=None):
nSamples = np.size(samples, axis=0)
nDays = len(dates)
time_points = np.arange(nDays)
mean = np.zeros((nSamples, nDays), dtype=np.float)
mean_daily = np.zeros((nSamples, nDays), dtype=np.float)
mean_no_vaccination = np.zeros((nSamples, nDays), dtype=np.float)
mean_daily_no_vaccination = np.zeros((nSamples, nDays), dtype=np.float)
if theta is not None:
theta = np.mean(samples, axis=0)
t_span = [0, nDays + 1]
t_eval = np.linspace(0.0, nDays + 1, 100000)
my_plot_configs()
fig_a = plt.figure()
for i in range(nSamples):
a, b, g, d, l, r_I, r_V = samples[i]
epi = DSA(df=self.df, a=a, b=b, g=g, d=d, l=l, r_I=r_I, r_V=r_V)
n = epi.n
y0 = [1.0, r_V, r_I, 0.0]
ode_fun = lambda t, y: DSA.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
I = interp1d(t_eval, sol.y[2])
mean_daily[i] = np.asarray(list(n * I(x) for x in time_points))
mean[i] = np.cumsum(mean_daily[i]) + n0
epi = DSA1(df=self.df, a=a, b=b, g=g, d=0.0, l=l, r_I=r_I, r_V=r_V)
n = epi.n
y0 = [1.0, r_V, r_I, 0.0]
ode_fun = lambda t, y: DSA1.SVIR_ODE(t, y, a=a, b=b, g=g, d=0.0, l=l)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
I = interp1d(t_eval, sol.y[2])
mean_daily_no_vaccination[i] = np.asarray(list(n * I(x) for x in time_points))
mean_no_vaccination[i] = np.cumsum(mean_daily[i]) + n0
m = np.int64(np.ceil(np.mean(mean_daily, axis=0)))
median = np.int64(np.percentile(mean_daily, 50.0, axis=0))
low = np.int64(np.ceil(np.quantile(mean_daily, q=0.025, axis=0)))
high = np.int64(np.ceil(np.quantile(mean_daily, q=0.975, axis=0)))
m_nv = np.int64(np.ceil(np.mean(mean_daily_no_vaccination, axis=0)))
median_nv = np.int64(np.percentile(mean_daily_no_vaccination, 50.0, axis=0))
low_nv = np.int64(np.ceil(np.quantile(mean_daily_no_vaccination, q=0.025, axis=0)))
high_nv = np.int64(np.ceil(np.quantile(mean_daily_no_vaccination, q=0.975, axis=0)))
# l2mle, = plt.plot(dates['d'].dt.date, mle, '-.', color=greys['grey2'].get_rgb(), lw=3, label='Prediction')
l2median, = plt.plot(dates['d'].dt.date, median, '-.', color=cyans['cyan5'].get_rgb(), lw=3, label='Median')
l2, = plt.plot(dates['d'].dt.date, m, '-', color=cyans['cyan5'].get_rgb(), lw=3, label="Mean")
l3 = plt.plot(dates['d'].dt.date, low, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l4 = plt.plot(dates['d'].dt.date, high, '--', color=cyans['cyan3'].get_rgb(), lw=1.5)
l5 = plt.fill_between(dates['d'].dt.date, low, high, alpha=.2, color=cyans['cyan1'].get_rgb())
# l2mle_nv, = plt.plot(dates['d'].dt.date, mle_nv, '-.', color=greys['grey2'].get_rgb(), lw=3, label='Prediction')
l2median_nv, = plt.plot(dates['d'].dt.date, median_nv, '-.', color=coffee['coffee4'].get_rgb(), lw=3, label='Median')
l2, = plt.plot(dates['d'].dt.date, m_nv, '-', color=coffee['coffee4'].get_rgb(), lw=3, label="Mean")
l3 = plt.plot(dates['d'].dt.date, low_nv, '--', color=coffee['coffee2'].get_rgb(), lw=1.5)
l4 = plt.plot(dates['d'].dt.date, high_nv, '--', color=coffee['coffee2'].get_rgb(), lw=1.5)
l5 = plt.fill_between(dates['d'].dt.date, low_nv, high_nv, alpha=.2, color=coffee['coffee1'].get_rgb())
# l7 = plt.plot(df['time'].values, df['daily_positive'].values + n0, '-', color=maroons['maroon3'].get_rgb(),lw=2, label='Actual')
plt.xlabel('Dates')
plt.ylabel('Daily cases')
# plt.ylim(0, 2000000)
# plt.legend(handles=[l2mle])
ax = plt.gca()
date_form = DateFormatter("%m-%d")
ax.xaxis.set_major_formatter(date_form)
sns.despine()
my_dict = {}
my_dict['Dates'] = dates['d']
my_dict['Mean'] = m
# my_dict['Prediction'] = mle
my_dict['Median'] = median
my_dict['High'] = high
my_dict['Low'] = low
my_dict['Mean_NoVac'] = m_nv
# my_dict['Prediction_NoVac'] = mle_nv
my_dict['Median_NoVac'] = median_nv
my_dict['High_NoVac'] = high_nv
my_dict['Low_NoVac'] = low_nv
my_dict = pd.DataFrame(my_dict)
# my_dict.to_csv(os.path.join(Plot_Folder, fname + '.csv'), index=False)
return fig_a, my_dict
class DSA2():
def __init__(self, df=None, df_main=None, a=0.1, b=0.6, g=0.2, d=0.4, l=0.0, k=1/28, r_I=1e-6, r_V1=1e-6, r_V2=0.0, drop=0.5, parent=None, **kwargs):
self.df = df
self.df_main = df_main
self.a = a
self.b = b
self.g = g
self.d = d
self.l = l
self.k = k
self.r_I = r_I
self.r_V1 = r_V1
self.r_V2 = r_V2
self.drop = drop
self.parent = parent
self.T = np.ceil(self.df['exit_time'].max())
if kwargs.get('timepoints') is None:
self.timepoints = np.linspace(0.0, self.T, 10000)
else:
self.timepoints = kwargs.get('timepoints')
@classmethod
def SVIR_ODE(cls, t, y, a, b, g, d, l, k, drop=0.5):
dydt = np.zeros(5)
dydt[0] = -b*y[0]*y[3] - d*y[0]
dydt[1] = d*y[0] - k*y[1] - (1-drop*a)*b*y[1]*y[3] - drop*a*l*y[1]
dydt[2] = k*y[1] - (1-a)*b*y[2]*y[3] - a*l*y[2]
dydt[3] = b*y[0]*y[3] + (1-drop*a)*b*y[1]*y[3] + (1-a)*b*y[2]*y[3] - g*y[3]
dydt[4] = drop*a*l*y[1] + a*l*y[2] + g*y[3]
return dydt
@classmethod
def SVIR_Extended_ODE(cls, t, y, a, b, g, d, l, k, drop=0.5):
dydt = np.zeros(6)
dydt[0] = -b*y[0]*y[3] - d*y[0]
dydt[1] = d*y[0] - k*y[1] - (1-drop*a)*b*y[1]*y[3] - drop*a*l*y[1]
dydt[2] = k*y[1] - (1-a)*b*y[2]*y[3] - a*l*y[2]
dydt[3] = b*y[0]*y[3] + (1-drop*a)*b*y[1]*y[3] + (1-a)*b*y[2]*y[3] - g*y[3]
dydt[4] = drop*a*l*y[1] + a*l*y[2] + g*y[3]
# dydt[5] = -b*y[0]*y[3] - (1-0.5*a)*b*y[1]*y[3] - (1-a)*b*y[2]*y[3]
dydt[5] = y[3]
return dydt
@classmethod
def draw_parms_prior(cls, a_bound=(0.09, 0.11),
b_bound=(1 / 5.6, 0.75),
g_bound=(0.5 / 5.6, 2 / 5.6),
d_bound=(0.4, 1.0),
l_bound=(0, 1e-3),
k_bound=(0.9/28, 1.1/28),
r_V1_bound=(0.15, 0.25),
r_V2_bound=(0, 0.2),
r_I_bound=(1e-6, 5e-1),
nSample=1):
a_sample = np.random.uniform(low=a_bound[0], high=a_bound[1], size=nSample)
b_sample = np.random.uniform(low=b_bound[0], high=b_bound[1], size=nSample)
g_sample = np.random.uniform(low=g_bound[0], high=g_bound[1], size=nSample)
d_sample = np.random.uniform(low=d_bound[0], high=d_bound[1], size=nSample)
l_sample = np.random.uniform(low=l_bound[0], high=l_bound[1], size=nSample)
k_sample = np.random.uniform(low=k_bound[0], high=k_bound[1], size=nSample)
r_V1_sample = np.random.uniform(low=r_V1_bound[0], high=r_V1_bound[1], size=nSample)
# r_V2_sample = np.random.uniform(low=r_V2_bound[0], high=r_V2_bound[1], size=nSample)
r_V2_sample = np.random.uniform(low=r_V2_bound[0], high=r_V1_sample, size=nSample)
r_I_sample = np.random.uniform(low=r_I_bound[0], high=r_I_bound[1], size=nSample)
return a_sample, b_sample, g_sample, d_sample, l_sample, k_sample, r_V1_sample, r_V2_sample, r_I_sample
@property
def R0(self):
return 1.0 * self.b/self.g
@property
def kT(self):
if self.parent is None:
return self.df['exit_time'].shape[0]
else:
return self.parent.kT
@property
def rescale(self):
return 1 - self.S(self.T)
@property
def n(self):
return self.kT / self.rescale
@property
def sT(self):
return self.n - self.kT
@property
def theta(self):
return [self.a, self.b, self.g, self.d, self.l, self.k, self.r_I, self.r_V1, self.r_V2]
@property
def S(self):
a, b, g, d, l, k, r_I, r_V1, r_V2 = self.theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V1, self.r_V2, self.r_I, 0.0]
ode_fun = lambda t, y: DSA2.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l, k=k, drop=self.drop)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
return S
def add_fits(self, samples):
fits = []
l = np.size(samples, axis=0)
for i in range(l):
a, b, g, d, l, k, r_I, r_V1, r_V2 = samples[i]
fit = DSA2(df=self.df, a=a, b=b, g=g, d=d, l=l, k=k, r_I=r_I, r_V1=r_V1, r_V2=r_V2, drop=self.drop, parent=self)
fits.append(fit)
self.fits = fits
return self
def compute_density(self, theta):
a, b, g, d, l, k, r_I, r_V1, r_V2 = theta
t_span = [0, self.T]
t_eval = np.linspace(0.0, self.T, 100000)
y0 = [1.0, self.r_V1, self.r_V2, self.r_I, 0.0]
ode_fun = lambda t, y: DSA2.SVIR_ODE(t, y, a=a, b=b, g=g, d=d, l=l, k=k, drop=self.drop)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval, dense_output=True)
S = interp1d(t_eval, sol.y[0])
I = interp1d(t_eval, sol.y[3])
out = []
ST = S(self.T)
for x in self.timepoints:
Sx = S(x)
Ix = I(x)
out.append((b*Sx*Ix + d*Sx)/(1-ST))
return out
def plot_density_fit_posterior(self, samples):
nSamples = np.size(samples, axis=0)
Ds = np.zeros((nSamples, len(self.timepoints)), dtype=np.float)
for idx in range(nSamples):
Ds[idx] = self.compute_density(samples[idx])
Dslow = np.quantile(Ds, q=0.025, axis=0)
Dshigh = np.quantile(Ds, q=0.975, axis=0)
Dmean = np.mean(Ds, axis=0)
fig = plt.figure()
plt.plot(self.timepoints, Dmean, '-', color=forrest['forrest3'].get_rgb(), lw=3)
plt.plot(self.timepoints, Dslow, '--', color=forrest['forrest3'].get_rgb(), lw=1)
plt.plot(self.timepoints, Dshigh, '--', color=forrest['forrest3'].get_rgb(), lw=1)
# plt.axvline(x=self.T, color=junglegreen['green3'].get_rgb(), linestyle='-')
mirrored_data = (2 * self.T - self.df['exit_time'].values).tolist()
combined_data = self.df['exit_time'].values.tolist() + mirrored_data
dense = gaussian_kde(combined_data)
denseval = list(dense(x) * 2 for x in self.timepoints)
plt.plot(self.timepoints, denseval, '-', color=purplybrown['purplybrown4'].get_rgb(), lw=3)
plt.fill_between(self.timepoints, Dslow, Dshigh, alpha=.3, color=forrest['forrest1'].get_rgb())
plt.legend()
plt.ylabel('$-\dot{S}_t/(1-S_T)$')
plt.xlabel('t')
c = cumtrapz(Dmean, self.timepoints)
ind = np.argmax(c >= 0.001)
plt.xlim((self.timepoints[ind], self.timepoints[-1] + 1))
sns.despine()
return fig
@classmethod
def prob_test_positive(cls, t, T, theta, lag=22, drop=0.5):
a, b, g, d, l, k, r_I, r_V1, r_V2 = theta
# T = self.T
t_span = [0, T + 1]
t_eval = np.linspace(0.0, T + 1, 100000)
y0 = [1.0, r_V1, r_V2, r_I, 0.0, 0.0]
ode_fun = lambda t, y: DSA2.SVIR_Extended_ODE(t, y, a=a, b=b, g=g, d=d, l=l, k=k, drop=drop)
sol = sc.integrate.solve_ivp(ode_fun, t_span, y0, method='RK45', t_eval=t_eval)
S = interp1d(t_eval, sol.y[0])
V_1 = interp1d(t_eval, sol.y[1])
V_2 = interp1d(t_eval, sol.y[2])
I = interp1d(t_eval, sol.y[3])
S_I = interp1d(t_eval, sol.y[5])
dose_1_efficacy = drop*a
factor = 1 - dose_1_efficacy
if t < lag:
prob1 = (1-np.exp(- b*S_I(t)))*np.exp(-d*t)/(1+r_V1+r_V2+r_I)
prob2 = r_V1*(1-np.exp(-factor*b*S_I(t)))*np.exp(-(k+dose_1_efficacy*l)*t)/(1+r_V1+r_V2+r_I)
prob3 = r_V2*(1-np.exp(-(1-a)*b*S_I(t)))*np.exp(-a*l*t)/(1+r_V1+r_V2+r_I)
# prob4 = r_I*np.exp(-g*t)/(1+r_V1+r_V2+r_I)
prob4 = r_I/(1+r_V1+r_V2+r_I)
test_pos_prob = prob1 + prob2 + prob3 + prob4
# test_pos_prob = (1.0 - S_I(t))
# test_pos_prob = (1.0 - S_I(t))/(1-S(T))
else:
prob1 = (np.exp(- b*S_I(t-lag)) - np.exp(- b * S_I(t)))*np.exp(-d * t)/(1 + r_V1 + r_V2 + r_I)
prob2 = r_V1*(np.exp(-factor*b*S_I(t-lag)) - np.exp(-factor*b*S_I(t)))*np.exp(-(k+dose_1_efficacy*l)*t)/(1+r_V1+r_V2+r_I)
prob3 = r_V2*(np.exp(-(1-a)*b*S_I(t-lag)) - np.exp(-(1-a)*b*S_I(t))) * np.exp(-a*l*t)/(1+r_V1+r_V2+r_I)
# prob4 = r_I * np.exp(-g * t) / (1 + r_V1 + r_V2 + r_I)
prob4 = 0.0
# prob4 = (b*S(t-21)+factor*b*V_1 + (1-a)*b*V_2)*I(t-21)*np.exp(-21*g)
# pressure = S_I(t) - S_I(t-21)
test_pos_prob = prob1 + prob2 + prob3 + prob4
# test_pos_prob = (S_I(t - lag) - S_I(t))
# test_pos_prob = (S_I(t-21) - S_I(t))/(1-S(T))
# if test_pos_prob > 0:
# return test_pos_prob
# else:
# return 0
return test_pos_prob
@classmethod
def binom_likelihood(cls, df_main, theta, drop=0.5):
nDates = df_main.time.size
total_tests = df_main.daily_test.values
daily_pos = df_main.daily_positive.values
T = (df_main.time.max() - df_main.time.min()).days + 1
loglikelihood = 0.0
for d in range(nDates):
test_pos_prob = DSA2.prob_test_positive(d + 1, T, theta=theta, drop=drop)
loglikelihood = loglikelihood + binom.logpmf(daily_pos[d], total_tests[d], max(0,min(test_pos_prob,1)), loc=0)
return -loglikelihood
def children_daily_test_pos_prediction(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
total_tests = df_main.children_daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA2.prob_test_positive(d + 1, T, sample, drop=self.drop)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], max(0,min(test_pos_prob,1)), size=1)
return predicted_test_pos
def children_daily_test_pos_prediction_smooth(self, sample=None):
df_main = self.df_main
if sample is None:
sample = self.theta
nDates = df_main.time.size
total_tests = df_main.children_daily_test.values
predicted_test_pos = np.zeros(nDates, dtype=np.int64)
res = np.zeros(nDates)
T = (df_main.time.max() - df_main.time.min()).days + 1
for d in range(nDates):
test_pos_prob = DSA2.prob_test_positive(d + 1, T, sample, drop=self.drop)
# print(test_pos_prob)
predicted_test_pos[d] = np.random.binomial(total_tests[d], max(0,min(test_pos_prob,1)), size=1)
res = | pd.DataFrame(predicted_test_pos, columns=['daily_positive']) | pandas.DataFrame |
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas import Index, Period, PeriodIndex, Series, date_range, offsets, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
class TestPeriodIndex:
def setup_method(self, method):
pass
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period("2011-03", freq="M")]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), pd.PeriodIndex(np.array(arr)))
arr = [pd.Period("2011-01", freq="M"), pd.NaT, pd.Period("2011-03", freq="D")]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(
pd.Index(np.array(arr)), pd.Index(np.array(arr), dtype=object)
)
def test_constructor_use_start_freq(self):
# GH #1118
p = Period("4/2/2012", freq="B")
with tm.assert_produces_warning(FutureWarning):
index = PeriodIndex(start=p, periods=10)
expected = period_range(start="4/2/2012", periods=10, freq="B")
tm.assert_index_equal(index, expected)
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=years, month=months, freq="2M")
msg = "Can either instantiate from fields or endpoints, but not both"
with pytest.raises(ValueError, match=msg):
PeriodIndex(
year=years, month=months, freq="M", start=Period("2007-01", freq="M")
)
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_U(self):
# U was used as undefined period
with pytest.raises(ValueError, match="Invalid frequency: X"):
period_range("2007-1-1", periods=500, freq="X")
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="N"), end=Period(ordinal=4, freq="N"), freq="N"
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="N"),
Period(ordinal=2, freq="N"),
Period(ordinal=3, freq="N"),
Period(ordinal=4, freq="N"),
],
freq="N",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, pd.Index(years))
tm.assert_index_equal(pindex.quarter, pd.Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex(year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC")
def test_constructor_corner(self):
msg = "Not enough parameters to construct Period range"
with pytest.raises(ValueError, match=msg):
PeriodIndex(periods=10, freq="A")
start = Period("2007", freq="A-JUN")
end = Period("2010", freq="A-DEC")
msg = "start and end must have same freq"
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start, end=end)
msg = (
"Of the three parameters: start, end, and periods, exactly two"
" must be specified"
)
with pytest.raises(ValueError, match=msg):
PeriodIndex(start=start)
with pytest.raises(ValueError, match=msg):
PeriodIndex(end=end)
result = period_range("2007-01", periods=10.5, freq="M")
exp = period_range("2007-01", periods=10, freq="M")
tm.assert_index_equal(result, exp)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx._ndarray_values)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx._ndarray_values))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="A"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "M"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2M"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
msg = r"Wrong dtype: datetime64\[us\]"
with pytest.raises(ValueError, match=msg):
PeriodIndex(vals, freq="D")
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = pd.date_range("2017", periods=4, freq="M")
if box is None:
data = data._values
elif box == "series":
data = pd.Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = pd.PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "M"
with pytest.raises(ValueError, match="freq not specified"):
pd.PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[pd.NaT, pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[
pd.NaT,
pd.NaT,
Period("2011-01", freq="M"),
Period("2011-01", freq="M"),
]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([pd.NaT, pd.NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([pd.NaT, pd.NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([pd.NaT, pd.NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), pd.NaT, Period("2011-01", freq="D")]
)
)
# first element is pd.NaT
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
with pytest.raises(period.IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[pd.NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", pd.NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), pd.NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
def test_constructor_simple_new(self):
idx = period_range("2007-01", name="p", periods=2, freq="M")
result = idx._simple_new(idx, name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
result = idx._simple_new(idx.astype("i8"), name="p", freq=idx.freq)
tm.assert_index_equal(result, idx)
def test_constructor_simple_new_empty(self):
# GH13079
idx = PeriodIndex([], freq="M", name="p")
result = idx._simple_new(idx, name="p", freq="M")
tm.assert_index_equal(result, idx)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = r"PeriodIndex\._simple_new does not accept floats"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex._simple_new(floats, freq="M")
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
pd.PeriodIndex(floats, freq="M")
def test_constructor_nat(self):
msg = "start and end must not be NaT"
with pytest.raises(ValueError, match=msg):
period_range(start="NaT", end="2011-01-01", freq="M")
with pytest.raises(ValueError, match=msg):
period_range(start="2011-01-01", end="NaT", freq="M")
def test_constructor_year_and_quarter(self):
year = pd.Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex(year=year, quarter=quarter)
strs = ["%dQ%d" % t for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
@pytest.mark.parametrize(
"func, warning", [(PeriodIndex, FutureWarning), (period_range, None)]
)
def test_constructor_freq_mult(self, func, warning):
# GH #7811
with tm.assert_produces_warning(warning):
# must be the same, but for sure...
pidx = func(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
with tm.assert_produces_warning(warning):
pidx = func(end="2014-01-01 17:00", freq="4H", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4H"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it" " represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it" " represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize("freq", ["A", "M", "D", "T", "S"])
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq):
freqstr = str(mult) + freq
pidx = period_range(start="2014-04-01", freq=freqstr, periods=10)
expected = date_range(start="2014-04-01", freq=freqstr, periods=10).to_period(
freqstr
)
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1H", "1H1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25H")
for freq in ["1D1H", "1H1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25H")
tm.assert_index_equal(pidx, expected)
def test_constructor_range_based_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
pi = PeriodIndex(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
def test_constructor_range_based_deprecated_different_freq(self):
with tm.assert_produces_warning(FutureWarning) as m:
PeriodIndex(start="2000", periods=2)
warning, = m
assert 'freq="A-DEC"' in str(warning.message)
def test_constructor(self):
pi = period_range(freq="A", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="H", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="S", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2006-12-31", ("w", 1))
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
end_intv = Period("2005-05-01", "B")
i1 = period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
# Mixed freq should fail
vals = [end_intv, Period("2006-12-31", "w")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
with pytest.raises(IncompatibleFrequency, match=msg):
| PeriodIndex(vals) | pandas.PeriodIndex |
#coding=utf-8
import pandas as pd
import numpy as np
import sys
import os
from sklearn import preprocessing
import datetime
import scipy as sc
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.externals import joblib
#import joblib
class FEbase(object):
"""description of class"""
def __init__(self, **kwargs):
pass
def create(self,*DataSetName):
#print (self.__class__.__name__)
(filepath, tempfilename) = os.path.split(DataSetName[0])
(filename, extension) = os.path.splitext(tempfilename)
#bufferstring='savetest2017.csv'
bufferstringoutput=filepath+'/'+filename+'_'+self.__class__.__name__+extension
if(os.path.exists(bufferstringoutput)==False):
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
df_all=self.core(DataSetName)
df_all.to_csv(bufferstringoutput)
return bufferstringoutput
def core(self,df_all,Data_adj_name=''):
return df_all
def real_FE():
return 0
class FEg30eom0110network(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
intflag=True
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
if(intflag):
df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
if(intflag):
df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
if(intflag):
df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max',True)
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max',True)
df_all,_=FEsingle.HighLowRange(df_all,8,True)
df_all,_=FEsingle.HighLowRange(df_all,25,True)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
if(intflag):
df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
if(intflag):
df_all['pct_chg_abs_rank']=df_all['pct_chg_abs_rank']*10//2
df_all=FEsingle.PctChgAbsSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,3,True)
df_all=FEsingle.PctChgSumRank(df_all,6,True)
df_all=FEsingle.PctChgSumRank(df_all,12,True)
df_all=FEsingle.AmountChgRank(df_all,12,True)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
if(intflag):
df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FEg30eom0110onlinew6d(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['sm_amount_pos']=df_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_all['lg_amount_pos']=df_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_all['net_mf_amount_pos']=df_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_all['sm_amount']=df_all.groupby('ts_code')['sm_amount'].shift(1)
df_all['lg_amount']=df_all.groupby('ts_code')['lg_amount'].shift(1)
df_all['net_mf_amount']=df_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
##排除科创版
#print(df_all)
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
#df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
class FE_a23(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>18]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all=pd.read_csv(DataSetName[3],index_col=0,header=0)
df_long_all=pd.read_csv(DataSetName[4],index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
#df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos']
#df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos']
#df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos']
#df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
#df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
#df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_money_all=FEsingle.InputChgSum(df_money_all,5,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,5,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,12,'net_mf_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'sm_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'lg_amount')
df_money_all=FEsingle.InputChgSum(df_money_all,25,'net_mf_amount')
#df_money_all['sm_amount_25_diff']=df_money_all['sm_amount_25']-df_money_all['sm_amount_12']
#df_money_all['sm_amount_12_diff']=df_money_all['sm_amount_12']-df_money_all['sm_amount_5']
#df_money_all['lg_amount_25_diff']=df_money_all['lg_amount_25']-df_money_all['lg_amount_12']
#df_money_all['lg_amount_12_diff']=df_money_all['lg_amount_12']-df_money_all['lg_amount_5']
#df_money_all['net_mf_amount_25_diff']=df_money_all['net_mf_amount_25']-df_money_all['net_mf_amount_12']
#df_money_all['net_mf_amount_12_diff']=df_money_all['net_mf_amount_12']-df_money_all['net_mf_amount_5']
print(df_money_all)
df_all=pd.merge(df_data, df_adj_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='inner', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
df_all['limit_percent']=df_all['down_limit']/df_all['up_limit']
#是否st或其他
df_all['st_or_otherwrong']=0
df_all.loc[(df_all['limit_percent']<0.85) & (0.58<df_all['limit_percent']),'st_or_otherwrong']=1
df_all.drop(['up_limit','down_limit','limit_percent'],axis=1,inplace=True)
df_all['dayofweek']=pd.to_datetime(df_all['trade_date'],format='%Y%m%d')
df_all['dayofweek']=df_all['dayofweek'].dt.dayofweek
##排除科创版
#print(df_all)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['real_price']=df_all['close']*df_all['adj_factor']
#df_all['real_open']=df_all['adj_factor']*df_all['open']
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
#===================================================================================================================================#
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,12,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,5,'max')
df_all,_=FEsingle.HighLowRange(df_all,5)
df_all,_=FEsingle.HighLowRange(df_all,12)
df_all,_=FEsingle.HighLowRange(df_all,25)
df_all['25_pct_rank_min_diff']=df_all['25_pct_rank_min']-df_all['12_pct_rank_min']
df_all['12_pct_rank_min_diff']=df_all['12_pct_rank_min']-df_all['5_pct_rank_min']
df_all['25_pct_rank_max_diff']=df_all['25_pct_rank_max']-df_all['12_pct_rank_max']
df_all['12_pct_rank_max_diff']=df_all['12_pct_rank_max']-df_all['5_pct_rank_max']
df_all['25_pct_Rangerank_diff']=df_all['25_pct_Rangerank']-df_all['12_pct_Rangerank']
df_all['12_pct_Rangerank_diff']=df_all['12_pct_Rangerank']-df_all['5_pct_Rangerank']
df_all.drop(['change','vol'],axis=1,inplace=True)
#===================================================================================================================================#
#df_all['mvadj']=1
#df_all.loc[df_all['total_mv_rank']<11,'mvadj']=0.9
#df_all.loc[df_all['total_mv_rank']<7,'mvadj']=0.85
#df_all.loc[df_all['total_mv_rank']<4,'mvadj']=0.6
#df_all.loc[df_all['total_mv_rank']<2,'mvadj']=0.45
#df_all.loc[df_all['total_mv_rank']<1,'mvadj']=0.35
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
#df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
###真实价格范围(区分实际股价高低)
#df_all['price_real_rank']=df_all.groupby('trade_date')['pre_close'].rank(pct=True)
#df_all['price_real_rank']=df_all['price_real_rank']*10//1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgAbsSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSumRank(df_all,24)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.PctChgSum(df_all,24)
df_all['chg_rank_24_diff']=df_all['chg_rank_24']-df_all['chg_rank_12']
df_all['chg_rank_12_diff']=df_all['chg_rank_12']-df_all['chg_rank_6']
df_all['chg_rank_6_diff']=df_all['chg_rank_6']-df_all['chg_rank_3']
df_all['pct_chg_24_diff']=df_all['pct_chg_24']-df_all['pct_chg_12']
df_all['pct_chg_12_diff']=df_all['pct_chg_12']-df_all['pct_chg_6']
df_all['pct_chg_6_diff']=df_all['pct_chg_6']-df_all['pct_chg_3']
#df_all=FEsingle.AmountChgRank(df_all,12)
#df_all=FEsingle.AmountChgRank(df_all,30)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*9.9//2
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','real_price_pos'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],1)
df_all=FEsingle.OldFeaturesRank(df_all,['sm_amount','lg_amount','net_mf_amount'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12'],3)
df_all.drop(['pre_close','adj_factor','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
df_all=FEsingle.PredictDaysTrend(df_all,5)
#df_all['tomorrow_chg_rank'] = np.random.randint(0, 10, df_all.shape[0])
#df_all.drop(['mvadj'],axis=1,inplace=True)
df_all.drop(['pct_chg'],axis=1,inplace=True)
#删除股价过低的票
df_all=df_all[df_all['close']>2]
#df_all=df_all[df_all['8_pct_rank_min']>0.1]
#df_all=df_all[df_all['25_pct_rank_max']>0.1]
#df_all=df_all[df_all['total_mv_rank']>15]
#df_all=df_all[df_all['total_mv_rank']>2]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['circ_mv_pct']>3]
#df_all=df_all[df_all['ps_ttm']>3]
#df_all=df_all[df_all['pb_rank']>3]
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop','amount','close','real_price'],axis=1,inplace=True)
df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
#df_all.dropna(axis=0,how='any',inplace=True)
print(df_all)
df_all=df_all.reset_index(drop=True)
return df_all
def real_FE(self):
#新模型预定版本
df_data=pd.read_csv('real_now.csv',index_col=0,header=0)
df_adj_all=pd.read_csv('real_adj_now.csv',index_col=0,header=0)
df_money_all=pd.read_csv('real_moneyflow_now.csv',index_col=0,header=0)
df_long_all=pd.read_csv('real_long_now.csv',index_col=0,header=0)
df_money_all.drop(['buy_sm_vol','sell_sm_vol','buy_md_vol','sell_md_vol','buy_lg_vol','sell_lg_vol','buy_md_vol','sell_md_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_elg_vol','buy_elg_amount','sell_elg_vol','sell_elg_amount','net_mf_vol'],axis=1,inplace=True)
df_money_all.drop(['buy_md_amount','sell_md_amount'],axis=1,inplace=True)
df_money_all['sm_amount']=df_money_all['buy_sm_amount']-df_money_all['sell_sm_amount']
df_money_all['lg_amount']=df_money_all['buy_lg_amount']-df_money_all['sell_lg_amount']
df_money_all.drop(['buy_sm_amount','sell_sm_amount'],axis=1,inplace=True)
df_money_all.drop(['buy_lg_amount','sell_lg_amount'],axis=1,inplace=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_money_all['sm_amount_pos']=df_money_all.groupby('ts_code')['sm_amount_pos'].shift(1)
df_money_all['lg_amount_pos']=df_money_all.groupby('ts_code')['lg_amount_pos'].shift(1)
df_money_all['net_mf_amount_pos']=df_money_all.groupby('ts_code')['net_mf_amount_pos'].shift(1)
df_money_all['sm_amount']=df_money_all.groupby('ts_code')['sm_amount'].shift(1)
df_money_all['lg_amount']=df_money_all.groupby('ts_code')['lg_amount'].shift(1)
df_money_all['net_mf_amount']=df_money_all.groupby('ts_code')['net_mf_amount'].shift(1)
df_all=pd.merge(df_data, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_money_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_long_all, how='left', on=['ts_code','trade_date'])
print(df_all)
#df_all.drop(['turnover_rate','volume_ratio','pe','pb'],axis=1,inplace=True)
df_all.drop(['turnover_rate','volume_ratio','pe','dv_ttm'],axis=1,inplace=True)
#这里打一个问号
#df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
#df_all=pd.read_csv(bufferstring,index_col=0,header=0,nrows=100000)
#df_all.drop(['change','vol'],axis=1,inplace=True)
df_all['ts_code'] = df_all['ts_code'].astype('str') #将原本的int数据类型转换为文本
df_all['ts_code'] = df_all['ts_code'].str.zfill(6) #用的时候必须加上.str前缀
print(df_all)
##排除科创版
#print(df_all)
df_all[["ts_code"]]=df_all[["ts_code"]].astype(str)
df_all=df_all[df_all['ts_code'].str.startswith('688')==False]
df_all['class1']=0
df_all.loc[df_all['ts_code'].str.startswith('30')==True,'class1']=1
df_all.loc[df_all['ts_code'].str.startswith('60')==True,'class1']=2
df_all.loc[df_all['ts_code'].str.startswith('00')==True,'class1']=3
#===================================================================================================================================#
#复权后价格
df_all['adj_factor']=df_all['adj_factor'].fillna(0)
df_all['real_price']=df_all['close']*df_all['adj_factor']
df_all['real_price']=df_all.groupby('ts_code')['real_price'].shift(1)
df_all['real_price']=df_all['real_price']*(1+df_all['pct_chg']/100)
#===================================================================================================================================#
df_all['real_price_pos']=df_all.groupby('ts_code')['real_price'].rolling(20).apply(lambda x: rollingRankSciPyB(x)).reset_index(0,drop=True)
df_all['total_mv_rank']=df_all.groupby('trade_date')['total_mv'].rank(pct=True)
df_all['total_mv_rank']=df_all.groupby('ts_code')['total_mv_rank'].shift(1)
df_all['total_mv_rank']=df_all['total_mv_rank']*19.9//1
df_all['pb_rank']=df_all.groupby('trade_date')['pb'].rank(pct=True)
df_all['pb_rank']=df_all.groupby('ts_code')['pb_rank'].shift(1)
#df_all['pb_rank']=df_all['pb_rank']*10//1
df_all['circ_mv_pct']=(df_all['total_mv']-df_all['circ_mv'])/df_all['total_mv']
df_all['circ_mv_pct']=df_all.groupby('trade_date')['circ_mv_pct'].rank(pct=True)
df_all['circ_mv_pct']=df_all.groupby('ts_code')['circ_mv_pct'].shift(1)
#df_all['circ_mv_pct']=df_all['circ_mv_pct']*10//1
df_all['ps_ttm']=df_all.groupby('trade_date')['ps_ttm'].rank(pct=True)
df_all['ps_ttm']=df_all.groupby('ts_code')['ps_ttm'].shift(1)
#df_all['ps_ttm']=df_all['ps_ttm']*10//1
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'min')
df_all,_=FEsingle.CloseWithHighLow(df_all,25,'max')
df_all,_=FEsingle.CloseWithHighLow(df_all,8,'max')
df_all,_=FEsingle.HighLowRange(df_all,8)
df_all,_=FEsingle.HighLowRange(df_all,25)
#===================================================================================================================================#
#是否停
df_all['high_stop']=0
df_all.loc[df_all['pct_chg']>9.4,'high_stop']=1
df_all.loc[(df_all['pct_chg']<5.2) & (4.8<df_all['pct_chg']),'high_stop']=1
#1日
df_all['chg_rank']=df_all.groupby('trade_date')['pct_chg'].rank(pct=True)
#df_all['chg_rank']=df_all['chg_rank']*10//2
df_all['pct_chg_abs']=df_all['pct_chg'].abs()
df_all['pct_chg_abs_rank']=df_all.groupby('trade_date')['pct_chg_abs'].rank(pct=True)
df_all=FEsingle.PctChgSumRank(df_all,3)
df_all=FEsingle.PctChgSumRank(df_all,6)
df_all=FEsingle.PctChgSumRank(df_all,12)
df_all=FEsingle.PctChgSum(df_all,3)
df_all=FEsingle.PctChgSum(df_all,6)
df_all=FEsingle.PctChgSum(df_all,12)
df_all=FEsingle.AmountChgRank(df_all,12)
#计算三种比例rank
dolist=['open','high','low']
df_all['pct_chg_r']=df_all['pct_chg']
for curc in dolist:
buffer=((df_all[curc]-df_all['pre_close'])*100)/df_all['pre_close']
df_all[curc]=buffer
df_all[curc]=df_all.groupby('trade_date')[curc].rank(pct=True)
#df_all[curc]=df_all[curc]*10//2
#df_all=FEsingle.PctChgSumRank_Common(df_all,5,'high')
df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pct_chg_r','pst_amount_rank_12','real_price_pos'],1)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],2)
#df_all=FEsingle.OldFeaturesRank(df_all,['open','high','low','pst_amount_rank_12'],3)
#删除市值过低的票
df_all=df_all[df_all['close']>3]
#df_all=df_all[df_all['chg_rank']>0.7]
df_all=df_all[df_all['amount']>15000]
#df_all=df_all[df_all['total_mv_rank']<12]
df_all.drop(['close','pre_close','pct_chg','adj_factor','real_price','amount','total_mv','pb','circ_mv','pct_chg_abs'],axis=1,inplace=True)
#暂时不用的列
df_all=df_all[df_all['high_stop']==0]
#df_all=df_all[df_all['st_or_otherwrong']==1]
#'tomorrow_chg'
df_all.drop(['high_stop'],axis=1,inplace=True)
#df_all.drop(['st_or_otherwrong'],axis=1,inplace=True)
df_all.dropna(axis=0,how='any',inplace=True)
month_sec=df_all['trade_date'].max()
df_all=df_all[df_all['trade_date']==month_sec]
print(df_all)
df_all=df_all.reset_index(drop=True)
df_all.to_csv('today_train.csv')
dwdw=1
class FE_a29_Volatility(FEbase):
#这个版本变为3天预测
def __init__(self):
pass
def core(self,DataSetName):
df_data=pd.read_csv(DataSetName[0],index_col=0,header=0)
df_adj_all=pd.read_csv(DataSetName[1],index_col=0,header=0)
df_limit_all=pd.read_csv(DataSetName[2],index_col=0,header=0)
df_money_all= | pd.read_csv(DataSetName[3],index_col=0,header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Tests for abagen.images module
"""
import gzip
from pkg_resources import resource_filename
import nibabel as nib
import numpy as np
import pandas as pd
import pytest
from abagen import images
from abagen.matching import AtlasTree
@pytest.fixture(scope='module')
def annotation(tmp_path_factory):
labels = np.tile([0, 1, 2, 3, 4], 5)
ctab = np.asarray([
[25, 5, 25, 0, 1639705],
[25, 100, 40, 0, 2647065],
[125, 100, 160, 0, 10511485],
[100, 25, 0, 0, 6500],
[120, 70, 50, 0, 3294840]
])
names = [b'background', b'label1', b'label2', b'label3', b'label4']
fname = tmp_path_factory.mktemp('annot') / 'test.annot'
nib.freesurfer.write_annot(fname, labels, ctab, names, False)
return fname
@pytest.fixture(scope='module')
def fsgeometry():
return (
resource_filename('abagen', 'data/fsaverage5-pial-lh.surf.gii.gz'),
resource_filename('abagen', 'data/fsaverage5-pial-rh.surf.gii.gz'),
)
def test_leftify_atlas(atlas):
out = images.leftify_atlas(atlas['image'])
assert len(np.unique(out.dataobj)) == 44
assert np.all(np.asarray(out.dataobj)[98:] == 0)
def test_relabel_gifti(surface):
surface = surface['image']
# basic usage (`surface` has gap between left + right hemi for subcortex)
lh, rh = images.relabel_gifti(surface, background=None)
data = np.hstack((lh.agg_data(), rh.agg_data()))
assert np.allclose(np.unique(data), np.arange(69))
# usage with unique "background"
lh, rh = images.relabel_gifti(surface, background=['bankssts'])
data = np.hstack((lh.agg_data(), rh.agg_data()))
assert np.allclose(np.unique(data), np.arange(67))
# usage with offset
lh, rh = images.relabel_gifti(surface, offset=100)
assert np.allclose(np.unique(lh.agg_data())[1:], np.arange(1, 35))
assert np.allclose(np.unique(rh.agg_data())[1:], np.arange(100, 134))
def test_annot_to_gifti(annotation):
labels = [
'background', 'label1', 'label2', 'label3', 'label4'
]
gii = images.annot_to_gifti(annotation)
assert np.allclose(np.unique(gii.agg_data()), np.arange(5))
lt = gii.labeltable.get_labels_as_dict()
assert np.allclose(list(lt.keys()), np.arange(5))
assert np.all(list(lt.values()) == labels)
def test_check_img(atlas):
# some really basic, silly checks
out = images.check_img(atlas['image'])
assert out.header.get_data_dtype() == np.dtype('int32')
assert len(out.shape) == 3
with pytest.raises(TypeError):
images.check_img('doesnotexist.nii.gz')
with pytest.raises(ValueError):
images.check_img(nib.Nifti1Image(np.zeros((5, 5, 5, 2)), np.eye(4)))
def test_check_surface(surface):
surface = surface['image']
# default; load images
atlas, info = images.check_surface(surface)
assert atlas.shape == (20484,)
assert isinstance(info, pd.DataFrame)
assert info.shape == (68, 3)
assert all(info.columns == ['label', 'hemisphere', 'structure'])
assert info.index.name == 'id'
assert all(info['structure'] == 'cortex')
# load pre-loaded images
imgs = []
for hemi in surface:
with gzip.GzipFile(hemi) as gz:
imgs.append(nib.GiftiImage.from_bytes(gz.read()))
atlas2, info2 = images.check_surface(imgs)
assert np.allclose(atlas, atlas2)
pd.testing.assert_frame_equal(info, info2)
# array is simply returned
atlas3, info3 = images.check_surface(atlas)
assert np.allclose(atlas, atlas3)
assert info3 is None
with pytest.raises(TypeError):
images.check_surface(surface[0])
with pytest.raises(TypeError):
images.check_surface(('lh.nii.gz', 'rh.nii.gz'))
def test_check_atlas(atlas, surface, fsgeometry):
# check loading volumetric atlas
tree = images.check_atlas(atlas['image'])
assert isinstance(tree, AtlasTree)
assert tree.atlas_info is None
assert tree.volumetric
assert len(tree.coords) == 819621
# check loading volumetric atlas with info
tree = images.check_atlas(atlas['image'], atlas['info'])
assert isinstance(tree, AtlasTree)
assert isinstance(tree.atlas_info, pd.DataFrame)
assert tree.volumetric
assert len(tree.coords) == 819621
# check loading surface (info is intuited)
tree = images.check_atlas(surface['image'])
assert isinstance(tree, AtlasTree)
assert isinstance(tree.atlas_info, pd.DataFrame)
assert not tree.volumetric
assert len(tree.coords) == 18426
tree = images.check_atlas(surface['image'], geometry=fsgeometry,
space='fsaverage')
assert isinstance(tree, AtlasTree)
assert isinstance(tree.atlas_info, pd.DataFrame)
assert not tree.volumetric
assert len(tree.coords) == 18426
with pytest.raises(ValueError):
images.check_atlas(surface['image'], geometry=fsgeometry)
# check loading donor-specific surface file
fp = 'data/native_dk/12876/atlas-desikankilliany-{}.label.gii.gz'
surf = [
resource_filename('abagen', fp.format(hemi)) for hemi in ('lh', 'rh')
]
tree = images.check_atlas(surf, donor='12876')
assert isinstance(tree, AtlasTree)
assert isinstance(tree.atlas_info, pd.DataFrame)
assert not tree.volumetric
assert len(tree.coords) == 386566
def test_check_geometry(fsgeometry):
coords, triangles = images.check_geometry(fsgeometry, 'fsaverage')
assert len(coords) == 20484
assert len(triangles) == 40960
with pytest.raises(ValueError):
images.check_geometry(fsgeometry, 'notaspace')
with pytest.raises(ValueError):
images.check_geometry(fsgeometry, 'fsnative', donor=None)
with pytest.raises(TypeError):
images.check_geometry(fsgeometry[0], 'fsaverage')
def test_check_atlas_info(atlas):
labels = np.trim_zeros(np.unique(nib.load(atlas['image']).dataobj))
# general usage (providing two filenames) works as expected
out = images.check_atlas_info(atlas['info'], labels)
assert all(out.columns == ['label', 'hemisphere', 'structure'])
assert out.index.name == 'id'
# can accept dataframe as input
atlas_df = pd.read_csv(atlas['info'])
out2 = images.check_atlas_info(atlas_df, labels)
pd.testing.assert_frame_equal(out, out2)
# setting ID as index of dataframe is acceptable usage
atlas_df = atlas_df.set_index('id')
out3 = images.check_atlas_info(atlas_df, labels)
pd.testing.assert_frame_equal(out, out3)
# check that coercion of different hemisphere designations works
atlas_df.loc[atlas_df['hemisphere'] == "L", 'hemisphere'] = "lh"
atlas_df.loc[atlas_df['hemisphere'] == "R", 'hemisphere'] = "r"
out4 = images.check_atlas_info(atlas_df, labels)
| pd.testing.assert_frame_equal(out, out4) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
import copy
import json
import os
from io import BytesIO
import xlsxwriter
import yaml
settings = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.yaml')
with open(settings, 'r') as f:
settings = yaml.load(f.read(),Loader=yaml.SafeLoader)
class _Workbook:
"""
Excel Workbook level configurations. This is not part of the end_user API.
This class facilitates:
1. Crawling the xlcompose object on which `to_excel` is called
2. Writing the each nested object to an Excel file
"""
def __init__(self, workbook_path, exhibits, default_formats):
""" Initialize the writer object
"""
self.formats = {}
self.writer = | pd.ExcelWriter(workbook_path) | pandas.ExcelWriter |
import unittest
import pandas as pd
from featurefilter import TargetCorrelationFilter
def test_low_continuous_correlation():
train_df = | pd.DataFrame({'A': [0, 0, 1, 1], 'Y': [0, 1, 0, 1]}) | pandas.DataFrame |
import math
from typing import Callable, Dict, Generator
import mock
import pandas
import pytest
from snowflake.connector.pandas_tools import write_pandas
MYPY = False
if MYPY: # from typing import TYPE_CHECKING once 3.5 is deprecated
from snowflake.connector import SnowflakeConnection
sf_connector_version_data = [
('snowflake-connector-python', '1.2.23'),
('snowflake-sqlalchemy', '1.1.1'),
('snowflake-connector-go', '0.0.1'),
('snowflake-go', '1.0.1'),
('snowflake-odbc', '3.12.3'),
]
sf_connector_version_df = | pandas.DataFrame(sf_connector_version_data, columns=['name', 'newest_version']) | pandas.DataFrame |
"""Unit tests for the reading functionality in dframeio.parquet"""
# pylint: disable=redefined-outer-name
from pathlib import Path
import pandas as pd
import pandera as pa
import pandera.typing
import pytest
from pandas.testing import assert_frame_equal
import dframeio
class SampleDataSchema(pa.SchemaModel):
"""pandera schema of the parquet test dataset"""
registration_dttm: pa.typing.Series[pa.typing.DateTime]
id: pa.typing.Series[pd.Int64Dtype] = pa.Field(nullable=True, coerce=True)
first_name: pa.typing.Series[pa.typing.String]
last_name: pa.typing.Series[pa.typing.String]
email: pa.typing.Series[pa.typing.String]
gender: pa.typing.Series[pa.typing.String] = pa.Field(coerce=True)
ip_address: pa.typing.Series[pa.typing.String]
cc: pa.typing.Series[pa.typing.String]
country: pa.typing.Series[pa.typing.String]
birthdate: pa.typing.Series[pa.typing.String]
salary: pa.typing.Series[pa.typing.Float64] = pa.Field(nullable=True)
title: pa.typing.Series[pa.typing.String]
comments: pa.typing.Series[pa.typing.String] = pa.Field(nullable=True)
@staticmethod
def length():
"""Known length of the data"""
return 5000
@staticmethod
def n_salary_over_150000():
"""Number of rows with salary > 150000"""
return 2384
@pytest.fixture(params=["multifile", "singlefile.parquet", "multifolder"])
def sample_data_path(request):
"""Path of a parquet dataset for testing"""
return Path(__file__).parent / "data" / "parquet" / request.param
def read_sample_dataframe():
"""Read the sample dataframe to pandas and return a cached copy"""
if not hasattr(read_sample_dataframe, "df"):
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
read_sample_dataframe.df = backend.read_to_pandas(parquet_file.name)
return read_sample_dataframe.df.copy()
@pytest.fixture(scope="function")
def sample_dataframe():
"""Provide the sample dataframe"""
return read_sample_dataframe()
@pytest.fixture(scope="function")
def sample_dataframe_dict():
"""Provide the sample dataframe"""
parquet_file = Path(__file__).parent / "data" / "parquet" / "singlefile.parquet"
backend = dframeio.ParquetBackend(str(parquet_file.parent))
return backend.read_to_dict(parquet_file.name)
@pytest.mark.parametrize(
"kwargs, exception",
[
({"base_path": "/some/dir", "partitions": -1}, TypeError),
({"base_path": "/some/dir", "partitions": 2.2}, TypeError),
({"base_path": "/some/dir", "partitions": "abc"}, TypeError),
({"base_path": "/some/dir", "partitions": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": b"abc"}, TypeError),
({"base_path": "/some/dir", "rows_per_file": 1.1}, TypeError),
({"base_path": "/some/dir", "rows_per_file": -5}, ValueError),
],
)
def test_init_argchecks(kwargs, exception):
"""Challenge the argument validation of the constructor"""
with pytest.raises(exception):
dframeio.ParquetBackend(**kwargs)
def test_read_to_pandas(sample_data_path):
"""Read a sample dataset into a pandas dataframe"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_columns(sample_data_path):
"""Read a sample dataset into a pandas dataframe, selecting some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, columns=["id", "first_name"])
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_pandas_some_rows(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, row_filter="salary > 150000")
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.n_salary_over_150000()
def test_read_to_pandas_sample(sample_data_path):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, sample=10)
SampleDataSchema.to_schema().validate(df)
assert len(df) == 10
@pytest.mark.parametrize("limit", [0, 10])
def test_read_to_pandas_limit(sample_data_path, limit):
"""Read a sample dataset into a pandas dataframe, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_pandas(sample_data_path.name, limit=limit)
SampleDataSchema.to_schema().validate(df)
assert len(df) == limit
def test_read_to_pandas_base_path_check(sample_data_path):
"""Try if it isn't possible to read from outside the base path"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
with pytest.raises(ValueError):
backend.read_to_pandas("/tmp")
def test_read_to_dict(sample_data_path):
"""Read a sample dataset into a dictionary"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name)
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = pd.DataFrame(df)
SampleDataSchema.to_schema().validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_columns(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some columns"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, columns=["id", "first_name"])
assert isinstance(df, dict)
assert set(df.keys()) == {"id", "first_name"}
df = pd.DataFrame(df)
SampleDataSchema.to_schema().select_columns(["id", "first_name"]).validate(df)
assert len(df) == SampleDataSchema.length()
def test_read_to_dict_some_rows(sample_data_path):
"""Read a sample dataset into a dictionary, filtering some rows"""
backend = dframeio.ParquetBackend(str(sample_data_path.parent))
df = backend.read_to_dict(sample_data_path.name, row_filter="salary > 150000")
assert isinstance(df, dict)
assert set(df.keys()) == SampleDataSchema.to_schema().columns.keys()
df = | pd.DataFrame(df) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.