code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import functools
import logbook
import numpy as np
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
check_entry,
choose_treasury
)
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
max_drawdown,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
)
def __init__(self, sim_params, treasury_curves, trading_calendar,
create_first_day_stats=False):
self.treasury_curves = treasury_curves
self.trading_calendar = trading_calendar
self.start_session = sim_params.start_session
self.end_session = sim_params.end_session
self.sessions = trading_calendar.sessions_in_range(
self.start_session, self.end_session
)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_session - self.sessions.freq
last_day = normalize_date(sim_params.end_session)
if last_day not in self.sessions:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.sessions = self.sessions.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
cont_index = self.sessions
self.cont_index = cont_index
self.cont_len = len(self.cont_index)
empty_cont = np.full(self.cont_len, np.nan)
self.algorithm_returns_cont = empty_cont.copy()
self.benchmark_returns_cont = empty_cont.copy()
self.algorithm_cumulative_leverages_cont = empty_cont.copy()
self.mean_returns_cont = empty_cont.copy()
self.annualized_mean_returns_cont = empty_cont.copy()
self.mean_benchmark_returns_cont = empty_cont.copy()
self.annualized_mean_benchmark_returns_cont = empty_cont.copy()
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = empty_cont.copy()
self.benchmark_cumulative_returns = empty_cont.copy()
self.algorithm_cumulative_leverages = empty_cont.copy()
self.excess_returns = empty_cont.copy()
self.latest_dt_loc = 0
self.latest_dt = cont_index[0]
self.benchmark_volatility = empty_cont.copy()
self.algorithm_volatility = empty_cont.copy()
self.beta = empty_cont.copy()
self.alpha = empty_cont.copy()
self.sharpe = empty_cont.copy()
self.downside_risk = empty_cont.copy()
self.sortino = empty_cont.copy()
self.drawdowns = empty_cont.copy()
self.max_drawdowns = empty_cont.copy()
self.max_drawdown = 0
self.max_leverages = empty_cont.copy()
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.sessions)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def update(self, dt, algorithm_returns, benchmark_returns, leverage):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
dt_loc = self.cont_index.get_loc(dt)
self.latest_dt_loc = dt_loc
self.algorithm_returns_cont[dt_loc] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt_loc + 1]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = np.append(0.0, self.algorithm_returns)
self.algorithm_cumulative_returns[dt_loc] = cum_returns(
self.algorithm_returns
)[-1]
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt_loc + 1]
self.mean_returns_cont[dt_loc] = \
algo_cumulative_returns_to_date[dt_loc] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt_loc + 1]
self.annualized_mean_returns_cont[dt_loc] = \
self.mean_returns_cont[dt_loc] * 252
self.annualized_mean_returns = \
self.annualized_mean_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = np.append(0.0, self.mean_returns)
self.annualized_mean_returns = np.append(
0.0, self.annualized_mean_returns)
self.benchmark_returns_cont[dt_loc] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = np.append(0.0, self.benchmark_returns)
self.benchmark_cumulative_returns[dt_loc] = cum_returns(
self.benchmark_returns
)[-1]
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt_loc + 1]
self.mean_benchmark_returns_cont[dt_loc] = \
benchmark_cumulative_returns_to_date[dt_loc] / \
self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt_loc]
self.annualized_mean_benchmark_returns_cont[dt_loc] = \
self.mean_benchmark_returns_cont[dt_loc] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt_loc + 1]
self.algorithm_cumulative_leverages_cont[dt_loc] = leverage
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt_loc + 1]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = np.append(
0.0,
self.algorithm_cumulative_leverages)
if not len(self.algorithm_returns) and len(self.benchmark_returns):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_session,
end=self.end_session,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.benchmark_volatility[dt_loc] = annual_volatility(
self.benchmark_returns
)
self.algorithm_volatility[dt_loc] = annual_volatility(
self.algorithm_returns
)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_session,
treasury_end,
self.trading_calendar,
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[dt_loc] = (
self.algorithm_cumulative_returns[dt_loc] -
self.treasury_period_return)
self.alpha[dt_loc], self.beta[dt_loc] = alpha_beta_aligned(
self.algorithm_returns,
self.benchmark_returns,
)
self.sharpe[dt_loc] = sharpe_ratio(
self.algorithm_returns,
)
self.downside_risk[dt_loc] = downside_risk(
self.algorithm_returns
)
self.sortino[dt_loc] = sortino_ratio(
self.algorithm_returns,
_downside_risk=self.downside_risk[dt_loc]
)
self.max_drawdown = max_drawdown(
self.algorithm_returns
)
self.max_drawdowns[dt_loc] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt_loc] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
dt_loc = self.latest_dt_loc
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility':
self.benchmark_volatility[dt_loc],
'algo_volatility':
self.algorithm_volatility[dt_loc],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return':
self.algorithm_cumulative_returns[dt_loc],
'benchmark_period_return':
self.benchmark_cumulative_returns[dt_loc],
'beta': self.beta[dt_loc],
'alpha': self.alpha[dt_loc],
'sharpe': self.sharpe[dt_loc],
'sortino': self.sortino[dt_loc],
'excess_return': self.excess_returns[dt_loc],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt_loc]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages_cont[
self.latest_dt_loc]
return max(cur_leverage, self.max_leverage) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/risk/cumulative.py | cumulative.py |
import logbook
import numpy as np
log = logbook.Logger('Risk')
TREASURY_DURATIONS = [
'1month', '3month', '6month',
'1year', '2year', '3year', '5year',
'7year', '10year', '30year'
]
# check if a field in rval is nan, and replace it with
# None.
def check_entry(key, value):
if key != 'period_label':
return np.isnan(value) or np.isinf(value)
else:
return False
def get_treasury_rate(treasury_curves, treasury_duration, day):
rate = None
curve = treasury_curves.ix[day]
# 1month note data begins in 8/2001,
# so we can use 3month instead.
idx = TREASURY_DURATIONS.index(treasury_duration)
for duration in TREASURY_DURATIONS[idx:]:
rate = curve[duration]
if rate is not None:
break
return rate
def select_treasury_duration(start_date, end_date):
td = end_date - start_date
if td.days <= 31:
treasury_duration = '1month'
elif td.days <= 93:
treasury_duration = '3month'
elif td.days <= 186:
treasury_duration = '6month'
elif td.days <= 366:
treasury_duration = '1year'
elif td.days <= 365 * 2 + 1:
treasury_duration = '2year'
elif td.days <= 365 * 3 + 1:
treasury_duration = '3year'
elif td.days <= 365 * 5 + 2:
treasury_duration = '5year'
elif td.days <= 365 * 7 + 2:
treasury_duration = '7year'
elif td.days <= 365 * 10 + 2:
treasury_duration = '10year'
else:
treasury_duration = '30year'
return treasury_duration
def choose_treasury(select_treasury, treasury_curves, start_session,
end_session, trading_calendar, compound=True):
"""
Find the latest known interest rate for a given duration within a date
range.
If we find one but it's more than a trading day ago from the date we're
looking for, then we log a warning
"""
treasury_duration = select_treasury(start_session, end_session)
search_day = None
if end_session in treasury_curves.index:
rate = get_treasury_rate(treasury_curves,
treasury_duration,
end_session)
if rate is not None:
search_day = end_session
if not search_day:
# in case end date is not a trading day or there is no treasury
# data, search for the previous day with an interest rate.
search_days = treasury_curves.index
# Find rightmost value less than or equal to end_session
i = search_days.searchsorted(end_session)
for prev_day in search_days[i - 1::-1]:
rate = get_treasury_rate(treasury_curves,
treasury_duration,
prev_day)
if rate is not None:
search_day = prev_day
search_dist = trading_calendar.session_distance(
end_session, prev_day
)
break
if search_day:
if (search_dist is None or search_dist > 1) and \
search_days[0] <= end_session <= search_days[-1]:
message = "No rate within 1 trading day of end date = \
{dt} and term = {term}. Using {search_day}. Check that date doesn't exceed \
treasury history range."
message = message.format(dt=end_session,
term=treasury_duration,
search_day=search_day)
log.warn(message)
if search_day:
td = end_session - start_session
if compound:
return rate * (td.days + 1) / 365
else:
return rate
message = "No rate for end date = {dt} and term = {term}. Check \
that date doesn't exceed treasury history range."
message = message.format(
dt=end_session,
term=treasury_duration
)
raise Exception(message) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/risk/risk.py | risk.py |
import functools
import logbook
from six import iteritems
import numpy as np
import pandas as pd
from . import risk
from . risk import check_entry
from empyrical import (
alpha_beta_aligned,
annual_volatility,
cum_returns,
downside_risk,
max_drawdown,
sharpe_ratio,
sortino_ratio
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_session, end_session, returns, trading_calendar,
treasury_curves, benchmark_returns, algorithm_leverages=None):
if treasury_curves.index[-1] >= start_session:
mask = ((treasury_curves.index >= start_session) &
(treasury_curves.index <= end_session))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self._start_session = start_session
self._end_session = end_session
self.trading_calendar = trading_calendar
trading_sessions = trading_calendar.sessions_in_range(
self._start_session,
self._end_session,
)
self.algorithm_returns = self.mask_returns_to_period(returns,
trading_sessions)
# Benchmark needs to be masked to the same dates as the algo returns
self.benchmark_returns = self.mask_returns_to_period(
benchmark_returns,
self.algorithm_returns.index
)
self.algorithm_leverages = algorithm_leverages
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
cum_returns(self.benchmark_returns).iloc[-1]
self.algorithm_period_returns = \
cum_returns(self.algorithm_returns).iloc[-1]
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self._start_session,
end=self._end_session
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.mean_algorithm_returns = (
self.algorithm_returns.cumsum() /
np.arange(1, self.num_trading_days + 1, dtype=np.float64)
)
self.benchmark_volatility = annual_volatility(self.benchmark_returns)
self.algorithm_volatility = annual_volatility(self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self._start_session,
self._end_session,
self.trading_calendar,
)
self.sharpe = sharpe_ratio(
self.algorithm_returns,
)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(self.sharpe):
self.sharpe = 0.0
self.downside_risk = downside_risk(
self.algorithm_returns.values
)
self.sortino = sortino_ratio(
self.algorithm_returns.values,
_downside_risk=self.downside_risk,
)
self.alpha, self.beta = alpha_beta_aligned(
self.algorithm_returns.values,
self.benchmark_returns.values,
)
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = max_drawdown(self.algorithm_returns.values)
self.max_leverage = self.calculate_max_leverage()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self._end_session.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"beta",
"alpha",
"max_drawdown",
"max_leverage",
"algorithm_returns",
"benchmark_returns",
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns, trading_days):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_day_mask = returns.index.normalize().isin(trading_days)
mask = ((returns.index >= self._start_session) &
(returns.index <= self._end_session) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_max_leverage(self):
if self.algorithm_leverages is None:
return 0.0
else:
return max(self.algorithm_leverages) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/risk/period.py | period.py |
import logbook
import datetime
from dateutil.relativedelta import relativedelta
from . period import RiskMetricsPeriod
log = logbook.Logger('Risk Report')
class RiskReport(object):
def __init__(self, algorithm_returns, sim_params, trading_calendar,
treasury_curves, benchmark_returns,
algorithm_leverages=None):
"""
algorithm_returns needs to be a list of daily_return objects
sorted in date ascending order
account needs to be a list of account objects sorted in date
ascending order
"""
self.algorithm_returns = algorithm_returns
self.sim_params = sim_params
self.trading_calendar = trading_calendar
self.treasury_curves = treasury_curves
self.benchmark_returns = benchmark_returns
self.algorithm_leverages = algorithm_leverages
if len(self.algorithm_returns) == 0:
start_session = self.sim_params.start_session
end_session = self.sim_params.end_session
else:
start_session = self.algorithm_returns.index[0]
end_session = self.algorithm_returns.index[-1]
self.month_periods = self.periods_in_range(
1, start_session, end_session
)
self.three_month_periods = self.periods_in_range(
3, start_session, end_session
)
self.six_month_periods = self.periods_in_range(
6, start_session, end_session
)
self.year_periods = self.periods_in_range(
12, start_session, end_session
)
def to_dict(self):
"""
RiskMetrics are calculated for rolling windows in four lengths::
- 1_month
- 3_month
- 6_month
- 12_month
The return value of this function is a dictionary keyed by the above
list of durations. The value of each entry is a list of RiskMetric
dicts of the same duration as denoted by the top_level key.
See :py:meth:`RiskMetrics.to_dict` for the detailed list of fields
provided for each period.
"""
return {
'one_month': [x.to_dict() for x in self.month_periods],
'three_month': [x.to_dict() for x in self.three_month_periods],
'six_month': [x.to_dict() for x in self.six_month_periods],
'twelve_month': [x.to_dict() for x in self.year_periods],
}
def periods_in_range(self, months_per, start_session, end_session):
one_day = datetime.timedelta(days=1)
ends = []
cur_start = start_session.replace(day=1)
# in edge cases (all sids filtered out, start/end are adjacent)
# a test will not generate any returns data
if len(self.algorithm_returns) == 0:
return ends
# ensure that we have an end at the end of a calendar month, in case
# the return series ends mid-month...
the_end = end_session.replace(day=1) + relativedelta(months=1) - \
one_day
while True:
cur_end = cur_start + relativedelta(months=months_per) - one_day
if cur_end > the_end:
break
cur_period_metrics = RiskMetricsPeriod(
start_session=cur_start,
end_session=cur_end,
returns=self.algorithm_returns,
benchmark_returns=self.benchmark_returns,
trading_calendar=self.trading_calendar,
treasury_curves=self.treasury_curves,
algorithm_leverages=self.algorithm_leverages,
)
ends.append(cur_period_metrics)
cur_start = cur_start + relativedelta(months=1)
return ends | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/risk/report.py | report.py |
from __future__ import division
import logbook
import pandas as pd
from pandas.tseries.tools import normalize_date
from zipline.finance.performance.period import PerformancePeriod
from zipline.errors import NoFurtherDataError
import zipline.finance.risk as risk
from . position_tracker import PositionTracker
log = logbook.Logger('Performance')
class PerformanceTracker(object):
"""
Tracks the performance of the algorithm.
"""
def __init__(self, sim_params, trading_calendar, env):
self.sim_params = sim_params
self.trading_calendar = trading_calendar
self.asset_finder = env.asset_finder
self.treasury_curves = env.treasury_curves
self.period_start = self.sim_params.start_session
self.period_end = self.sim_params.end_session
self.last_close = self.sim_params.last_close
self._current_session = self.sim_params.start_session
self.market_open, self.market_close = \
self.trading_calendar.open_and_close_for_session(
self._current_session
)
self.total_session_count = len(self.sim_params.sessions)
self.capital_base = self.sim_params.capital_base
self.emission_rate = sim_params.emission_rate
self.position_tracker = PositionTracker(
data_frequency=self.sim_params.data_frequency
)
if self.emission_rate == 'daily':
self.all_benchmark_returns = pd.Series(
index=self.sim_params.sessions
)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(
self.sim_params,
self.treasury_curves,
self.trading_calendar
)
elif self.emission_rate == 'minute':
self.all_benchmark_returns = pd.Series(index=pd.date_range(
self.sim_params.first_open, self.sim_params.last_close,
freq='Min')
)
self.cumulative_risk_metrics = \
risk.RiskMetricsCumulative(
self.sim_params,
self.treasury_curves,
self.trading_calendar,
create_first_day_stats=True
)
# this performance period will span the entire simulation from
# inception.
self.cumulative_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
data_frequency=self.sim_params.data_frequency,
# the cumulative period will be calculated over the entire test.
period_open=self.period_start,
period_close=self.period_end,
# don't save the transactions for the cumulative
# period
keep_transactions=False,
keep_orders=False,
# don't serialize positions for cumulative period
serialize_positions=False,
name="Cumulative"
)
self.cumulative_performance.position_tracker = self.position_tracker
# this performance period will span just the current market day
self.todays_performance = PerformancePeriod(
# initial cash is your capital base.
starting_cash=self.capital_base,
data_frequency=self.sim_params.data_frequency,
# the daily period will be calculated for the market day
period_open=self.market_open,
period_close=self.market_close,
keep_transactions=True,
keep_orders=True,
serialize_positions=True,
name="Daily"
)
self.todays_performance.position_tracker = self.position_tracker
self.saved_dt = self.period_start
# one indexed so that we reach 100%
self.session_count = 0.0
self.txn_count = 0
self.account_needs_update = True
self._account = None
def __repr__(self):
return "%s(%r)" % (
self.__class__.__name__,
{'simulation parameters': self.sim_params})
@property
def progress(self):
if self.emission_rate == 'minute':
# Fake a value
return 1.0
elif self.emission_rate == 'daily':
return self.session_count / self.total_session_count
def set_date(self, date):
if self.emission_rate == 'minute':
self.saved_dt = date
self.todays_performance.period_close = self.saved_dt
def get_portfolio(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
return self.cumulative_performance.as_portfolio()
def update_performance(self):
# calculate performance as of last trade
self.cumulative_performance.calculate_performance()
self.todays_performance.calculate_performance()
def get_account(self, performance_needs_update):
if performance_needs_update:
self.update_performance()
self.account_needs_update = True
if self.account_needs_update:
self._update_account()
return self._account
def _update_account(self):
self._account = self.cumulative_performance.as_account()
self.account_needs_update = False
def to_dict(self, emission_type=None):
"""
Creates a dictionary representing the state of this tracker.
Returns a dict object of the form described in header comments.
"""
# Default to the emission rate of this tracker if no type is provided
if emission_type is None:
emission_type = self.emission_rate
_dict = {
'period_start': self.period_start,
'period_end': self.period_end,
'capital_base': self.capital_base,
'cumulative_perf': self.cumulative_performance.to_dict(),
'progress': self.progress,
'cumulative_risk_metrics': self.cumulative_risk_metrics.to_dict()
}
if emission_type == 'daily':
_dict['daily_perf'] = self.todays_performance.to_dict()
elif emission_type == 'minute':
_dict['minute_perf'] = self.todays_performance.to_dict(
self.saved_dt)
else:
raise ValueError("Invalid emission type: %s" % emission_type)
return _dict
def prepare_capital_change(self, is_interday):
self.cumulative_performance.initialize_subperiod_divider()
if not is_interday:
# Change comes in the middle of day
self.todays_performance.initialize_subperiod_divider()
def process_capital_change(self, capital_change_amount, is_interday):
self.cumulative_performance.set_current_subperiod_starting_values(
capital_change_amount)
if is_interday:
# Change comes between days
self.todays_performance.adjust_period_starting_capital(
capital_change_amount)
else:
# Change comes in the middle of day
self.todays_performance.set_current_subperiod_starting_values(
capital_change_amount)
def process_transaction(self, transaction):
self.txn_count += 1
self.cumulative_performance.handle_execution(transaction)
self.todays_performance.handle_execution(transaction)
self.position_tracker.execute_transaction(transaction)
def handle_splits(self, splits):
leftover_cash = self.position_tracker.handle_splits(splits)
if leftover_cash > 0:
self.cumulative_performance.handle_cash_payment(leftover_cash)
self.todays_performance.handle_cash_payment(leftover_cash)
def process_order(self, event):
self.cumulative_performance.record_order(event)
self.todays_performance.record_order(event)
def process_commission(self, commission):
asset = commission['asset']
cost = commission['cost']
self.position_tracker.handle_commission(asset, cost)
self.cumulative_performance.handle_commission(cost)
self.todays_performance.handle_commission(cost)
def process_close_position(self, asset, dt, data_portal):
txn = self.position_tracker.\
maybe_create_close_position_transaction(asset, dt, data_portal)
if txn:
self.process_transaction(txn)
def check_upcoming_dividends(self, next_session, adjustment_reader):
"""
Check if we currently own any stocks with dividends whose ex_date is
the next trading day. Track how much we should be payed on those
dividends' pay dates.
Then check if we are owed cash/stock for any dividends whose pay date
is the next trading day. Apply all such benefits, then recalculate
performance.
"""
if adjustment_reader is None:
return
position_tracker = self.position_tracker
held_sids = set(position_tracker.positions)
# Dividends whose ex_date is the next trading day. We need to check if
# we own any of these stocks so we know to pay them out when the pay
# date comes.
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids,
next_session,
self.asset_finder
)
stock_dividends = adjustment_reader.\
get_stock_dividends_with_ex_date(
held_sids,
next_session,
self.asset_finder
)
position_tracker.earn_dividends(
cash_dividends,
stock_dividends
)
net_cash_payment = position_tracker.pay_dividends(next_session)
if not net_cash_payment:
return
self.cumulative_performance.handle_dividends_paid(net_cash_payment)
self.todays_performance.handle_dividends_paid(net_cash_payment)
def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
A minute perf packet.
"""
self.position_tracker.sync_last_sale_prices(dt, False, data_portal)
self.update_performance()
todays_date = normalize_date(dt)
account = self.get_account(False)
bench_returns = self.all_benchmark_returns.loc[todays_date:dt]
# cumulative returns
bench_since_open = (1. + bench_returns).prod() - 1
self.cumulative_risk_metrics.update(todays_date,
self.todays_performance.returns,
bench_since_open,
account.leverage)
minute_packet = self.to_dict(emission_type='minute')
return minute_packet
def handle_market_close(self, dt, data_portal):
"""
Handles the close of the given day, in both minute and daily emission.
In daily emission, also updates performance, benchmark and risk metrics
as it would in handle_minute_close if it were minute emission.
Parameters
__________
dt : Timestamp
The minute that is ending
Returns
_______
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == 'daily':
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.position_tracker.sync_last_sale_prices(dt, False, data_portal)
self.update_performance()
account = self.get_account(False)
benchmark_value = self.all_benchmark_returns[completed_session]
self.cumulative_risk_metrics.update(
completed_session,
self.todays_performance.returns,
benchmark_value,
account.leverage)
# increment the day counter before we move markers forward.
self.session_count += 1.0
# Get the next trading day and, if it is past the bounds of this
# simulation, return the daily perf packet
try:
next_session = self.trading_calendar.next_session_label(
completed_session
)
except NoFurtherDataError:
next_session = None
# Take a snapshot of our current performance to return to the
# browser.
daily_update = self.to_dict(emission_type='daily')
# On the last day of the test, don't create tomorrow's performance
# period. We may not be able to find the next trading day if we're at
# the end of our historical data
if self.market_close >= self.last_close:
return daily_update
# If the next trading day is irrelevant, then return the daily packet
if (next_session is None) or (next_session >= self.last_close):
return daily_update
# move the market day markers forward
# TODO Is this redundant with next_trading_day above?
self._current_session = next_session
self.market_open, self.market_close = \
self.trading_calendar.open_and_close_for_session(
self._current_session
)
# Roll over positions to current day.
self.todays_performance.rollover()
self.todays_performance.period_open = self.market_open
self.todays_performance.period_close = self.market_close
# Check for any dividends, then return the daily perf packet
self.check_upcoming_dividends(
next_session=next_session,
adjustment_reader=data_portal._adjustment_reader
)
return daily_update
def handle_simulation_end(self):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log_msg = "Simulated {n} trading days out of {m}."
log.info(log_msg.format(n=int(self.session_count),
m=self.total_session_count))
log.info("first open: {d}".format(
d=self.sim_params.first_open))
log.info("last close: {d}".format(
d=self.sim_params.last_close))
bms = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.benchmark_returns_cont)
ars = pd.Series(
index=self.cumulative_risk_metrics.cont_index,
data=self.cumulative_risk_metrics.algorithm_returns_cont)
acl = self.cumulative_risk_metrics.algorithm_cumulative_leverages
risk_report = risk.RiskReport(
ars,
self.sim_params,
benchmark_returns=bms,
algorithm_leverages=acl,
trading_calendar=self.trading_calendar,
treasury_curves=self.treasury_curves,
)
return risk_report.to_dict() | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/performance/tracker.py | tracker.py |
from __future__ import division
import logbook
import numpy as np
from collections import namedtuple
from math import isnan
from six import iteritems, itervalues
from zipline.finance.performance.position import Position
from zipline.finance.transaction import Transaction
from zipline.utils.input_validation import expect_types
import zipline.protocol as zp
from zipline.assets import (
Future,
Asset
)
from . position import positiondict
log = logbook.Logger('Performance')
PositionStats = namedtuple('PositionStats',
['net_exposure',
'gross_value',
'gross_exposure',
'short_value',
'short_exposure',
'shorts_count',
'long_value',
'long_exposure',
'longs_count',
'net_value'])
def calc_position_values(positions):
values = []
for position in positions:
if isinstance(position.asset, Future):
# Futures don't have an inherent position value.
values.append(0.0)
else:
values.append(position.last_sale_price * position.amount)
return values
def calc_net(values):
# Returns 0.0 if there are no values.
return sum(values, np.float64())
def calc_position_exposures(positions):
exposures = []
for position in positions:
exposure = position.amount * position.last_sale_price
if isinstance(position.asset, Future):
exposure *= position.asset.multiplier
exposures.append(exposure)
return exposures
def calc_long_value(position_values):
return sum(i for i in position_values if i > 0)
def calc_short_value(position_values):
return sum(i for i in position_values if i < 0)
def calc_long_exposure(position_exposures):
return sum(i for i in position_exposures if i > 0)
def calc_short_exposure(position_exposures):
return sum(i for i in position_exposures if i < 0)
def calc_longs_count(position_exposures):
return sum(1 for i in position_exposures if i > 0)
def calc_shorts_count(position_exposures):
return sum(1 for i in position_exposures if i < 0)
def calc_gross_exposure(long_exposure, short_exposure):
return long_exposure + abs(short_exposure)
def calc_gross_value(long_value, short_value):
return long_value + abs(short_value)
class PositionTracker(object):
def __init__(self, data_frequency):
# asset => position object
self.positions = positiondict()
self._unpaid_dividends = {}
self._unpaid_stock_dividends = {}
self._positions_store = zp.Positions()
self.data_frequency = data_frequency
@expect_types(asset=Asset)
def update_position(self, asset, amount=None, last_sale_price=None,
last_sale_date=None, cost_basis=None):
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
if amount is not None:
position.amount = amount
if last_sale_price is not None:
position.last_sale_price = last_sale_price
if last_sale_date is not None:
position.last_sale_date = last_sale_date
if cost_basis is not None:
position.cost_basis = cost_basis
def execute_transaction(self, txn):
# Update Position
# ----------------
asset = txn.asset
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
position.update(txn)
if position.amount == 0:
del self.positions[asset]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[asset]
except KeyError:
pass
@expect_types(asset=Asset)
def handle_commission(self, asset, cost):
# Adjust the cost basis of the stock if we own it
if asset in self.positions:
self.positions[asset].adjust_commission_cost_basis(asset, cost)
def handle_splits(self, splits):
"""
Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional sahres after modifying each
position.
"""
total_leftover_cash = 0
for asset, ratio in splits:
if asset in self.positions:
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[asset]
leftover_cash = position.handle_split(asset, ratio)
total_leftover_cash += leftover_cash
return total_leftover_cash
def earn_dividends(self, dividends, stock_dividends):
"""
Given a list of dividends whose ex_dates are all the next trading day,
calculate and store the cash and/or stock payments to be paid on each
dividend's pay date.
Parameters
----------
dividends: iterable of (asset, amount, pay_date) namedtuples
stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)
namedtuples.
"""
for dividend in dividends:
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[dividend.asset].earn_dividend(dividend)
try:
self._unpaid_dividends[dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
div_owed = \
self.positions[stock_dividend.asset].earn_stock_dividend(
stock_dividend)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].\
append(div_owed)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = \
[div_owed]
def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment['amount']
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment['payment_asset']
share_count = stock_payment['share_count']
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = \
Position(payment_asset)
position.amount += share_count
return net_cash_payment
def maybe_create_close_position_transaction(self, asset, dt, data_portal):
if not self.positions.get(asset):
return None
amount = self.positions.get(asset).amount
price = data_portal.get_spot_value(
asset, 'price', dt, self.data_frequency)
# Get the last traded price if price is no longer available
if isnan(price):
price = self.positions.get(asset).last_sale_price
txn = Transaction(
asset=asset,
amount=(-1 * amount),
dt=dt,
price=price,
commission=0,
order_id=None,
)
return txn
def get_positions(self):
positions = self._positions_store
for asset, pos in iteritems(self.positions):
if pos.amount == 0:
# Clear out the position if it has become empty since the last
# time get_positions was called. Catching the KeyError is
# faster than checking `if asset in positions`, and this can be
# potentially called in a tight inner loop.
try:
del positions[asset]
except KeyError:
pass
continue
position = zp.Position(asset)
position.amount = pos.amount
position.cost_basis = pos.cost_basis
position.last_sale_price = pos.last_sale_price
position.last_sale_date = pos.last_sale_date
# Adds the new position if we didn't have one before, or overwrite
# one we have currently
positions[asset] = position
return positions
def get_positions_list(self):
positions = []
for asset, pos in iteritems(self.positions):
if pos.amount != 0:
positions.append(pos.to_dict())
return positions
def sync_last_sale_prices(self, dt, handle_non_market_minutes,
data_portal):
if not handle_non_market_minutes:
for asset, position in iteritems(self.positions):
last_sale_price = data_portal.get_spot_value(
asset, 'price', dt, self.data_frequency
)
if not np.isnan(last_sale_price):
position.last_sale_price = last_sale_price
else:
for asset, position in iteritems(self.positions):
last_sale_price = data_portal.get_adjusted_value(
asset,
'price',
data_portal.trading_calendar.previous_minute(dt),
dt,
self.data_frequency
)
if not np.isnan(last_sale_price):
position.last_sale_price = last_sale_price
def stats(self):
amounts = []
last_sale_prices = []
for pos in itervalues(self.positions):
amounts.append(pos.amount)
last_sale_prices.append(pos.last_sale_price)
position_values = calc_position_values(itervalues(self.positions))
position_exposures = calc_position_exposures(
itervalues(self.positions)
)
long_value = calc_long_value(position_values)
short_value = calc_short_value(position_values)
gross_value = calc_gross_value(long_value, short_value)
long_exposure = calc_long_exposure(position_exposures)
short_exposure = calc_short_exposure(position_exposures)
gross_exposure = calc_gross_exposure(long_exposure, short_exposure)
net_exposure = calc_net(position_exposures)
longs_count = calc_longs_count(position_exposures)
shorts_count = calc_shorts_count(position_exposures)
net_value = calc_net(position_values)
return PositionStats(
long_value=long_value,
gross_value=gross_value,
short_value=short_value,
long_exposure=long_exposure,
short_exposure=short_exposure,
gross_exposure=gross_exposure,
net_exposure=net_exposure,
longs_count=longs_count,
shorts_count=shorts_count,
net_value=net_value
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/performance/position_tracker.py | position_tracker.py |
from __future__ import division
import logbook
import numpy as np
from collections import namedtuple
from zipline.assets import Future
try:
# optional cython based OrderedDict
from cyordereddict import OrderedDict
except ImportError:
from collections import OrderedDict
from six import itervalues, iteritems
import zipline.protocol as zp
log = logbook.Logger('Performance')
TRADE_TYPE = zp.DATASOURCE_TYPE.TRADE
PeriodStats = namedtuple('PeriodStats',
['net_liquidation',
'gross_leverage',
'net_leverage'])
PrevSubPeriodStats = namedtuple(
'PrevSubPeriodStats', ['returns', 'pnl', 'cash_flow']
)
CurrSubPeriodStats = namedtuple(
'CurrSubPeriodStats', ['starting_value', 'starting_cash']
)
def calc_net_liquidation(ending_cash, long_value, short_value):
return ending_cash + long_value + short_value
def calc_leverage(exposure, net_liq):
if net_liq != 0:
return exposure / net_liq
return np.inf
def calc_period_stats(pos_stats, ending_cash):
net_liq = calc_net_liquidation(ending_cash,
pos_stats.long_value,
pos_stats.short_value)
gross_leverage = calc_leverage(pos_stats.gross_exposure, net_liq)
net_leverage = calc_leverage(pos_stats.net_exposure, net_liq)
return PeriodStats(
net_liquidation=net_liq,
gross_leverage=gross_leverage,
net_leverage=net_leverage)
def calc_payout(multiplier, amount, old_price, price):
return (price - old_price) * multiplier * amount
class PerformancePeriod(object):
def __init__(
self,
starting_cash,
data_frequency,
period_open=None,
period_close=None,
keep_transactions=True,
keep_orders=False,
serialize_positions=True,
name=None):
self.data_frequency = data_frequency
# Start and end of the entire period
self.period_open = period_open
self.period_close = period_close
self.initialize(starting_cash=starting_cash,
starting_value=0.0,
starting_exposure=0.0)
self.ending_value = 0.0
self.ending_exposure = 0.0
self.ending_cash = starting_cash
self.subperiod_divider = None
# Keyed by asset, the previous last sale price of positions with
# payouts on price differences, e.g. Futures.
#
# This dt is not the previous minute to the minute for which the
# calculation is done, but the last sale price either before the period
# start, or when the price at execution.
self._payout_last_sale_prices = {}
self.keep_transactions = keep_transactions
self.keep_orders = keep_orders
self.name = name
# An object to recycle via assigning new values
# when returning portfolio information.
# So as not to avoid creating a new object for each event
self._portfolio_store = zp.Portfolio()
self._account_store = zp.Account()
self.serialize_positions = serialize_positions
_position_tracker = None
def initialize(self, starting_cash, starting_value, starting_exposure):
# Performance stats for the entire period, returned externally
self.pnl = 0.0
self.returns = 0.0
self.cash_flow = 0.0
self.starting_value = starting_value
self.starting_exposure = starting_exposure
self.starting_cash = starting_cash
# The cumulative capital change occurred within the period
self._total_intraperiod_capital_change = 0.0
self.processed_transactions = {}
self.orders_by_modified = {}
self.orders_by_id = OrderedDict()
@property
def position_tracker(self):
return self._position_tracker
@position_tracker.setter
def position_tracker(self, obj):
if obj is None:
raise ValueError("position_tracker can not be None")
self._position_tracker = obj
# we only calculate perf once we inject PositionTracker
self.calculate_performance()
def adjust_period_starting_capital(self, capital_change):
self.ending_cash += capital_change
self.starting_cash += capital_change
def rollover(self):
# We are starting a new period
self.initialize(starting_cash=self.ending_cash,
starting_value=self.ending_value,
starting_exposure=self.ending_exposure)
self.subperiod_divider = None
payout_assets = self._payout_last_sale_prices.keys()
for asset in payout_assets:
if asset in self._payout_last_sale_prices:
self._payout_last_sale_prices[asset] = \
self.position_tracker.positions[asset].last_sale_price
else:
del self._payout_last_sale_prices[asset]
def initialize_subperiod_divider(self):
self.calculate_performance()
# Initialize a subperiod divider to stash the current performance
# values. Current period starting values are set to equal ending values
# of the previous subperiod
self.subperiod_divider = SubPeriodDivider(
prev_returns=self.returns,
prev_pnl=self.pnl,
prev_cash_flow=self.cash_flow,
curr_starting_value=self.ending_value,
curr_starting_cash=self.ending_cash
)
def set_current_subperiod_starting_values(self, capital_change):
# Apply the capital change to the ending cash
self.ending_cash += capital_change
# Increment the total capital change occurred within the period
self._total_intraperiod_capital_change += capital_change
# Update the current subperiod starting cash to reflect the capital
# change
starting_value = self.subperiod_divider.curr_subperiod.starting_value
self.subperiod_divider.curr_subperiod = CurrSubPeriodStats(
starting_value=starting_value,
starting_cash=self.ending_cash)
def handle_dividends_paid(self, net_cash_payment):
if net_cash_payment:
self.handle_cash_payment(net_cash_payment)
self.calculate_performance()
def handle_cash_payment(self, payment_amount):
self.adjust_cash(payment_amount)
def handle_commission(self, cost):
# Deduct from our total cash pool.
self.adjust_cash(-cost)
def adjust_cash(self, amount):
self.cash_flow += amount
def adjust_field(self, field, value):
setattr(self, field, value)
def _get_payout_total(self, positions):
payouts = []
for asset, old_price in iteritems(self._payout_last_sale_prices):
pos = positions[asset]
amount = pos.amount
payout = calc_payout(
asset.multiplier,
amount,
old_price,
pos.last_sale_price)
payouts.append(payout)
return sum(payouts)
def calculate_performance(self):
pt = self.position_tracker
pos_stats = pt.stats()
self.ending_value = pos_stats.net_value
self.ending_exposure = pos_stats.net_exposure
payout = self._get_payout_total(pt.positions)
self.ending_cash = self.starting_cash + self.cash_flow + \
self._total_intraperiod_capital_change + payout
total_at_end = self.ending_cash + self.ending_value
# If there is a previous subperiod, the performance is calculated
# from the previous and current subperiods. Otherwise, the performance
# is calculated based on the start and end values of the whole period
if self.subperiod_divider:
starting_cash = self.subperiod_divider.curr_subperiod.starting_cash
total_at_start = starting_cash + \
self.subperiod_divider.curr_subperiod.starting_value
# Performance for this subperiod
pnl = total_at_end - total_at_start
if total_at_start != 0:
returns = pnl / total_at_start
else:
returns = 0.0
# Performance for this whole period
self.pnl = self.subperiod_divider.prev_subperiod.pnl + pnl
self.returns = \
(1 + self.subperiod_divider.prev_subperiod.returns) * \
(1 + returns) - 1
else:
total_at_start = self.starting_cash + self.starting_value
self.pnl = total_at_end - total_at_start
if total_at_start != 0:
self.returns = self.pnl / total_at_start
else:
self.returns = 0.0
def record_order(self, order):
if self.keep_orders:
try:
dt_orders = self.orders_by_modified[order.dt]
if order.id in dt_orders:
del dt_orders[order.id]
except KeyError:
self.orders_by_modified[order.dt] = dt_orders = OrderedDict()
dt_orders[order.id] = order
# to preserve the order of the orders by modified date
# we delete and add back. (ordered dictionary is sorted by
# first insertion date).
if order.id in self.orders_by_id:
del self.orders_by_id[order.id]
self.orders_by_id[order.id] = order
def handle_execution(self, txn):
self.cash_flow += self._calculate_execution_cash_flow(txn)
asset = txn.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
pos = self.position_tracker.positions[asset]
amount = pos.amount
price = txn.price
cash_adj = calc_payout(
asset.multiplier, amount, old_price, price)
self.adjust_cash(cash_adj)
if amount + txn.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
except KeyError:
self._payout_last_sale_prices[asset] = txn.price
if self.keep_transactions:
try:
self.processed_transactions[txn.dt].append(txn)
except KeyError:
self.processed_transactions[txn.dt] = [txn]
@staticmethod
def _calculate_execution_cash_flow(txn):
"""
Calculates the cash flow from executing the given transaction
"""
if isinstance(txn.asset, Future):
return 0.0
return -1 * txn.price * txn.amount
# backwards compat. TODO: remove?
@property
def positions(self):
return self.position_tracker.positions
@property
def position_amounts(self):
return self.position_tracker.position_amounts
def __core_dict(self):
pos_stats = self.position_tracker.stats()
period_stats = calc_period_stats(pos_stats, self.ending_cash)
rval = {
'ending_value': self.ending_value,
'ending_exposure': self.ending_exposure,
# this field is renamed to capital_used for backward
# compatibility.
'capital_used': self.cash_flow,
'starting_value': self.starting_value,
'starting_exposure': self.starting_exposure,
'starting_cash': self.starting_cash,
'ending_cash': self.ending_cash,
'portfolio_value': self.ending_cash + self.ending_value,
'pnl': self.pnl,
'returns': self.returns,
'period_open': self.period_open,
'period_close': self.period_close,
'gross_leverage': period_stats.gross_leverage,
'net_leverage': period_stats.net_leverage,
'short_exposure': pos_stats.short_exposure,
'long_exposure': pos_stats.long_exposure,
'short_value': pos_stats.short_value,
'long_value': pos_stats.long_value,
'longs_count': pos_stats.longs_count,
'shorts_count': pos_stats.shorts_count,
}
return rval
def to_dict(self, dt=None):
"""
Creates a dictionary representing the state of this performance
period. See header comments for a detailed description.
Kwargs:
dt (datetime): If present, only return transactions for the dt.
"""
rval = self.__core_dict()
if self.serialize_positions:
positions = self.position_tracker.get_positions_list()
rval['positions'] = positions
# we want the key to be absent, not just empty
if self.keep_transactions:
if dt:
# Only include transactions for given dt
try:
transactions = [x.to_dict()
for x in self.processed_transactions[dt]]
except KeyError:
transactions = []
else:
transactions = \
[y.to_dict()
for x in itervalues(self.processed_transactions)
for y in x]
rval['transactions'] = transactions
if self.keep_orders:
if dt:
# only include orders modified as of the given dt.
try:
orders = [x.to_dict()
for x in itervalues(self.orders_by_modified[dt])]
except KeyError:
orders = []
else:
orders = [x.to_dict() for x in itervalues(self.orders_by_id)]
rval['orders'] = orders
return rval
def as_portfolio(self):
"""
The purpose of this method is to provide a portfolio
object to algorithms running inside the same trading
client. The data needed is captured raw in a
PerformancePeriod, and in this method we rename some
fields for usability and remove extraneous fields.
"""
# Recycles containing objects' Portfolio object
# which is used for returning values.
# as_portfolio is called in an inner loop,
# so repeated object creation becomes too expensive
portfolio = self._portfolio_store
# maintaining the old name for the portfolio field for
# backward compatibility
portfolio.capital_used = self.cash_flow
portfolio.starting_cash = self.starting_cash
portfolio.portfolio_value = self.ending_cash + self.ending_value
portfolio.pnl = self.pnl
portfolio.returns = self.returns
portfolio.cash = self.ending_cash
portfolio.start_date = self.period_open
portfolio.positions = self.position_tracker.get_positions()
portfolio.positions_value = self.ending_value
portfolio.positions_exposure = self.ending_exposure
return portfolio
def as_account(self):
account = self._account_store
pt = self.position_tracker
pos_stats = pt.stats()
period_stats = calc_period_stats(pos_stats, self.ending_cash)
# If no attribute is found on the PerformancePeriod resort to the
# following default values. If an attribute is found use the existing
# value. For instance, a broker may provide updates to these
# attributes. In this case we do not want to over write the broker
# values with the default values.
account.settled_cash = \
getattr(self, 'settled_cash', self.ending_cash)
account.accrued_interest = \
getattr(self, 'accrued_interest', 0.0)
account.buying_power = \
getattr(self, 'buying_power', float('inf'))
account.equity_with_loan = \
getattr(self, 'equity_with_loan',
self.ending_cash + self.ending_value)
account.total_positions_value = \
getattr(self, 'total_positions_value', self.ending_value)
account.total_positions_exposure = \
getattr(self, 'total_positions_exposure', self.ending_exposure)
account.regt_equity = \
getattr(self, 'regt_equity', self.ending_cash)
account.regt_margin = \
getattr(self, 'regt_margin', float('inf'))
account.initial_margin_requirement = \
getattr(self, 'initial_margin_requirement', 0.0)
account.maintenance_margin_requirement = \
getattr(self, 'maintenance_margin_requirement', 0.0)
account.available_funds = \
getattr(self, 'available_funds', self.ending_cash)
account.excess_liquidity = \
getattr(self, 'excess_liquidity', self.ending_cash)
account.cushion = \
getattr(self, 'cushion',
self.ending_cash / (self.ending_cash + self.ending_value))
account.day_trades_remaining = \
getattr(self, 'day_trades_remaining', float('inf'))
account.leverage = getattr(self, 'leverage',
period_stats.gross_leverage)
account.net_leverage = getattr(self, 'net_leverage',
period_stats.net_leverage)
account.net_liquidation = getattr(self, 'net_liquidation',
period_stats.net_liquidation)
return account
class SubPeriodDivider(object):
"""
A marker for subdividing the period at the latest intraperiod capital
change. prev_subperiod and curr_subperiod hold information respective to
the previous and current subperiods.
"""
def __init__(self, prev_returns, prev_pnl, prev_cash_flow,
curr_starting_value, curr_starting_cash):
self.prev_subperiod = PrevSubPeriodStats(
returns=prev_returns,
pnl=prev_pnl,
cash_flow=prev_cash_flow)
self.curr_subperiod = CurrSubPeriodStats(
starting_value=curr_starting_value,
starting_cash=curr_starting_cash) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/performance/period.py | period.py |
from __future__ import division
from math import copysign
from collections import OrderedDict
import numpy as np
import logbook
from zipline.assets import Future, Asset
from zipline.utils.input_validation import expect_types
log = logbook.Logger('Performance')
class Position(object):
@expect_types(asset=Asset)
def __init__(self, asset, amount=0, cost_basis=0.0,
last_sale_price=0.0, last_sale_date=None):
self.asset = asset
self.amount = amount
self.cost_basis = cost_basis # per share
self.last_sale_price = last_sale_price
self.last_sale_date = last_sale_date
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'amount': self.amount * dividend.amount
}
def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
'payment_asset': stock_dividend.payment_asset,
'share_count': np.floor(
self.amount * float(stock_dividend.ratio)
)
}
@expect_types(asset=Asset)
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.asset != txn.asset:
raise Exception('updating position with txn for a '
'different asset')
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
self.amount = total_shares
@expect_types(asset=Asset)
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception('Updating a commission for a different asset?')
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
asset=self.asset,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
'sid': self.asset,
'amount': self.amount,
'cost_basis': self.cost_basis,
'last_sale_price': self.last_sale_price
}
class positiondict(OrderedDict):
def __missing__(self, key):
return None | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/finance/performance/position.py | position.py |
from textwrap import dedent
from numpy import (
bool_,
dtype,
float32,
float64,
int32,
int64,
int16,
uint16,
ndarray,
uint32,
uint8,
)
from zipline.errors import (
WindowLengthNotPositive,
WindowLengthTooLong,
)
from zipline.lib.labelarray import LabelArray
from zipline.utils.numpy_utils import (
datetime64ns_dtype,
float64_dtype,
int64_dtype,
uint8_dtype,
)
from zipline.utils.memoize import lazyval
# These class names are all the same because of our bootleg templating system.
from ._float64window import AdjustedArrayWindow as Float64Window
from ._int64window import AdjustedArrayWindow as Int64Window
from ._labelwindow import AdjustedArrayWindow as LabelWindow
from ._uint8window import AdjustedArrayWindow as UInt8Window
NOMASK = None
BOOL_DTYPES = frozenset(
map(dtype, [bool_]),
)
FLOAT_DTYPES = frozenset(
map(dtype, [float32, float64]),
)
INT_DTYPES = frozenset(
# NOTE: uint64 not supported because it can't be safely cast to int64.
map(dtype, [int16, uint16, int32, int64, uint32]),
)
DATETIME_DTYPES = frozenset(
map(dtype, ['datetime64[ns]', 'datetime64[D]']),
)
# We use object arrays for strings.
OBJECT_DTYPES = frozenset(map(dtype, ['O']))
STRING_KINDS = frozenset(['S', 'U'])
REPRESENTABLE_DTYPES = BOOL_DTYPES.union(
FLOAT_DTYPES,
INT_DTYPES,
DATETIME_DTYPES,
OBJECT_DTYPES,
)
def can_represent_dtype(dtype):
"""
Can we build an AdjustedArray for a baseline of `dtype``?
"""
return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS
def is_categorical(dtype):
"""
Do we represent this dtype with LabelArrays rather than ndarrays?
"""
return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS
CONCRETE_WINDOW_TYPES = {
float64_dtype: Float64Window,
int64_dtype: Int64Window,
uint8_dtype: UInt8Window,
}
def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype == bool_:
return data.astype(uint8), {'dtype': dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64), {'dtype': dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64), {'dtype': dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == 'M':
try:
outarray = data.astype('datetime64[ns]').view('int64')
return outarray, {'dtype': datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n"
% (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
)
class AdjustedArray(object):
"""
An array that can be iterated with a variable-length window, and which can
provide different views on data from different perspectives.
Parameters
----------
data : np.ndarray
The baseline data values.
mask : np.ndarray[bool]
A mask indicating the locations of missing data.
adjustments : dict[int -> list[Adjustment]]
A dict mapping row indices to lists of adjustments to apply when we
reach that row.
missing_value : object
A value to use to fill missing data in yielded windows.
Should be a value coercible to `data.dtype`.
"""
__slots__ = (
'_data',
'_view_kwargs',
'adjustments',
'missing_value',
'__weakref__',
)
def __init__(self, data, mask, adjustments, missing_value):
self._data, self._view_kwargs = _normalize_array(data, missing_value)
self.adjustments = adjustments
self.missing_value = missing_value
if mask is not NOMASK:
if mask.dtype != bool_:
raise ValueError("Mask must be a bool array.")
if data.shape != mask.shape:
raise ValueError(
"Mask shape %s != data shape %s." %
(mask.shape, data.shape),
)
self._data[~mask] = self.missing_value
@lazyval
def data(self):
"""
The data stored in this array.
"""
return self._data.view(**self._view_kwargs)
@lazyval
def dtype(self):
"""
The dtype of the data stored in this array.
"""
return self._view_kwargs.get('dtype') or self._data.dtype
@lazyval
def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype]
def traverse(self,
window_length,
offset=0,
perspective_offset=0):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
"""
data = self._data.copy()
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
)
def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
)
def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value):
if isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, ndarray):
return AdjustedArray(
ndarray_or_adjusted_array, NOMASK, {}, missing_value,
)
else:
raise TypeError(
"Can't convert %s to AdjustedArray" %
type(ndarray_or_adjusted_array).__name__
)
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" %
type(ndarray_or_adjusted_array).__name__
)
def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/lib/adjusted_array.py | adjusted_array.py |
from functools import partial
from operator import eq, ne
import re
import numpy as np
from numpy import ndarray
import pandas as pd
from toolz import compose
from zipline.utils.compat import unicode
from zipline.utils.preprocess import preprocess
from zipline.utils.sentinel import sentinel
from zipline.utils.input_validation import (
coerce,
expect_kinds,
expect_types,
optional,
)
from zipline.utils.numpy_utils import (
bool_dtype,
unsigned_int_dtype_with_size_in_bytes,
is_object,
)
from zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning
from ._factorize import (
factorize_strings,
factorize_strings_known_categories,
)
def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return (
left is right
or ((left.shape == right.shape) and (left == right).all())
)
def _make_unsupported_method(name):
def method(*args, **kwargs):
raise NotImplementedError(
"Method %s is not supported on LabelArrays." % name
)
method.__name__ = name
method.__doc__ = "Unsupported LabelArray Method: %s" % name
return method
class MissingValueMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched missing_values.
"""
def __init__(self, left, right):
super(MissingValueMismatch, self).__init__(
"LabelArray missing_values don't match:"
" left={}, right={}".format(left, right)
)
class CategoryMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched category arrays.
"""
def __init__(self, left, right):
(mismatches,) = np.where(left != right)
assert len(mismatches), "Not actually a mismatch!"
super(CategoryMismatch, self).__init__(
"LabelArray categories don't match:\n"
"Mismatched Indices: {mismatches}\n"
"Left: {left}\n"
"Right: {right}".format(
mismatches=mismatches,
left=left[mismatches],
right=right[mismatches],
)
)
_NotPassed = sentinel('_NotPassed')
class LabelArray(ndarray):
"""
An ndarray subclass for working with arrays of strings.
Factorizes the input array into integers, but overloads equality on strings
to check against the factor label.
Parameters
----------
values : array-like
Array of values that can be passed to np.asarray with dtype=object.
missing_value : str
Scalar value to treat as 'missing' for operations on ``self``.
categories : list[str], optional
List of values to use as categories. If not supplied, categories will
be inferred as the unique set of entries in ``values``.
sort : bool, optional
Whether to sort categories. If sort is False and categories is
supplied, they are left in the order provided. If sort is False and
categories is None, categories will be constructed in a random order.
Attributes
----------
categories : ndarray[str]
An array containing the unique labels of self.
reverse_categories : dict[str -> int]
Reverse lookup table for ``categories``. Stores the index in
``categories`` at which each entry each unique entry is found.
missing_value : str or None
A sentinel missing value with NaN semantics for comparisons.
Notes
-----
Consumers should be cautious when passing instances of LabelArray to numpy
functions. We attempt to disallow as many meaningless operations as
possible, but since a LabelArray is just an ndarray of ints with some
additional metadata, many numpy functions (for example, trigonometric) will
happily accept a LabelArray and treat its values as though they were
integers.
In a future change, we may be able to disallow more numerical operations by
creating a wrapper dtype which doesn't register an implementation for most
numpy ufuncs. Until that change is made, consumers of LabelArray should
assume that it is undefined behavior to pass a LabelArray to any numpy
ufunc that operates on semantically-numerical data.
See Also
--------
http://docs.scipy.org/doc/numpy-1.10.0/user/basics.subclassing.html
"""
SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None))
@preprocess(
values=coerce(list, partial(np.asarray, dtype=object)),
categories=coerce(np.ndarray, list),
)
@expect_types(
values=np.ndarray,
missing_value=SUPPORTED_SCALAR_TYPES,
categories=optional(list),
)
@expect_kinds(values=("O", "S", "U"))
def __new__(cls,
values,
missing_value,
categories=None,
sort=True):
# Numpy's fixed-width string types aren't very efficient. Working with
# object arrays is faster than bytes or unicode arrays in almost all
# cases.
if not is_object(values):
values = values.astype(object)
if categories is None:
codes, categories, reverse_categories = factorize_strings(
values.ravel(),
missing_value=missing_value,
sort=sort,
)
else:
codes, categories, reverse_categories = (
factorize_strings_known_categories(
values.ravel(),
categories=categories,
missing_value=missing_value,
sort=sort,
)
)
categories.setflags(write=False)
return cls.from_codes_and_metadata(
codes=codes.reshape(values.shape),
categories=categories,
reverse_categories=reverse_categories,
missing_value=missing_value,
)
@classmethod
def from_codes_and_metadata(cls,
codes,
categories,
reverse_categories,
missing_value):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret
@classmethod
def from_categorical(cls, categorical, missing_value=None):
"""
Create a LabelArray from a pandas categorical.
Parameters
----------
categorical : pd.Categorical
The categorical object to convert.
missing_value : bytes, unicode, or None, optional
The missing value to use for this LabelArray.
Returns
-------
la : LabelArray
The LabelArray representation of this categorical.
"""
return LabelArray(
categorical,
missing_value,
categorical.categories,
)
@property
def categories(self):
# This is a property because it should be immutable.
return self._categories
@property
def reverse_categories(self):
# This is a property because it should be immutable.
return self._reverse_categories
@property
def missing_value(self):
# This is a property because it should be immutable.
return self._missing_value
@property
def missing_value_code(self):
return self.reverse_categories[self.missing_value]
def has_label(self, value):
return value in self.reverse_categories
def __array_finalize__(self, obj):
"""
Called by Numpy after array construction.
There are three cases where this can happen:
1. Someone tries to directly construct a new array by doing::
>>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP
In this case, obj will be None. We treat this as an error case and
fail.
2. Someone (most likely our own __new__) does::
>>> other_array.view(type=LabelArray) # doctest: +SKIP
In this case, `self` will be the new LabelArray instance, and
``obj` will be the array on which ``view`` is being called.
The caller of ``obj.view`` is responsible for setting category
metadata on ``self`` after we exit.
3. Someone creates a new LabelArray by slicing an existing one.
In this case, ``obj`` will be the original LabelArray. We're
responsible for copying over the parent array's category metadata.
"""
if obj is None:
raise TypeError(
"Direct construction of LabelArrays is not supported."
)
# See docstring for an explanation of when these will or will not be
# set.
self._categories = getattr(obj, 'categories', None)
self._reverse_categories = getattr(obj, 'reverse_categories', None)
self._missing_value = getattr(obj, 'missing_value', None)
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
def as_string_array(self):
"""
Convert self back into an array of strings.
This is an O(N) operation.
"""
return self.categories[self.as_int_array()]
def as_categorical(self, name=None):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
name=name,
)
def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(name=name),
).unstack()
def __setitem__(self, indexer, value):
self_categories = self.categories
if isinstance(value, LabelArray):
value_categories = value.categories
if compare_arrays(self_categories, value_categories):
return super(LabelArray, self).__setitem__(indexer, value)
else:
raise CategoryMismatch(self_categories, value_categories)
elif isinstance(value, self.SUPPORTED_SCALAR_TYPES):
value_code = self.reverse_categories.get(value, -1)
if value_code < 0:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
else:
raise NotImplementedError(
"Setting into a LabelArray with a value of "
"type {type} is not yet supported.".format(
type=type(value).__name__,
),
)
def __setslice__(self, i, j, sequence):
"""
This method was deprecated in Python 2.0. It predates slice objects,
but Python 2.7.11 still uses it if you implement it, which ndarray
does. In newer Pythons, __setitem__ is always called, but we need to
manuallly forward in py2.
"""
self.__setitem__(slice(i, j), sequence)
def __getitem__(self, indexer):
result = super(LabelArray, self).__getitem__(indexer)
if result.ndim:
# Result is still a LabelArray, so we can just return it.
return result
# Result is a scalar value, which will be an instance of np.void.
# Map it back to one of our category entries.
index = result.view(
unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
return self.categories[index]
def is_missing(self):
"""
Like isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() == self.reverse_categories[self.missing_value]
)
def not_missing(self):
"""
Like ~isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() != self.reverse_categories[self.missing_value]
)
def _equality_check(op):
"""
Shared code for __eq__ and __ne__, parameterized on the actual
comparison operator to use.
"""
def method(self, other):
if isinstance(other, LabelArray):
self_mv = self.missing_value
other_mv = other.missing_value
if self_mv != other_mv:
raise MissingValueMismatch(self_mv, other_mv)
self_categories = self.categories
other_categories = other.categories
if not compare_arrays(self_categories, other_categories):
raise CategoryMismatch(self_categories, other_categories)
return (
op(self.as_int_array(), other.as_int_array())
& self.not_missing()
& other.not_missing()
)
elif isinstance(other, ndarray):
# Compare to ndarrays as though we were an array of strings.
# This is fairly expensive, and should generally be avoided.
return op(self.as_string_array(), other) & self.not_missing()
elif isinstance(other, self.SUPPORTED_SCALAR_TYPES):
i = self._reverse_categories.get(other, -1)
return op(self.as_int_array(), i) & self.not_missing()
return op(super(LabelArray, self), other)
return method
__eq__ = _equality_check(eq)
__ne__ = _equality_check(ne)
del _equality_check
def view(self, dtype=_NotPassed, type=_NotPassed):
if type is _NotPassed and dtype not in (_NotPassed, self.dtype):
raise TypeError("Can't view LabelArray as another dtype.")
# The text signature on ndarray.view makes it look like the default
# values for dtype and type are `None`, but passing None explicitly has
# different semantics than not passing an arg at all, so we reconstruct
# the kwargs dict here to simulate the args not being passed at all.
kwargs = {}
if dtype is not _NotPassed:
kwargs['dtype'] = dtype
if type is not _NotPassed:
kwargs['type'] = type
return super(LabelArray, self).view(**kwargs)
# In general, we support resizing, slicing, and reshaping methods, but not
# numeric methods.
SUPPORTED_NDARRAY_METHODS = frozenset([
'base',
'compress',
'copy',
'data',
'diagonal',
'dtype',
'flat',
'flatten',
'item',
'itemset',
'itemsize',
'nbytes',
'ndim',
'ravel',
'repeat',
'reshape',
'resize',
'setflags',
'shape',
'size',
'squeeze',
'strides',
'swapaxes',
'take',
'trace',
'transpose',
'view'
])
PUBLIC_NDARRAY_METHODS = frozenset([
s for s in dir(ndarray) if not s.startswith('_')
])
# Generate failing wrappers for all unsupported methods.
locals().update(
{
method: _make_unsupported_method(method)
for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS
}
)
def __repr__(self):
repr_lines = repr(self.as_string_array()).splitlines()
repr_lines[0] = repr_lines[0].replace('array(', 'LabelArray(', 1)
repr_lines[-1] = repr_lines[-1].rsplit(',', 1)[0] + ')'
# The extra spaces here account for the difference in length between
# 'array(' and 'LabelArray('.
return '\n '.join(repr_lines)
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
)
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()]
def startswith(self, prefix):
"""
Element-wise startswith.
Parameters
----------
prefix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self started with ``prefix``.
"""
return self.map_predicate(lambda elem: elem.startswith(prefix))
def endswith(self, suffix):
"""
Elementwise endswith.
Parameters
----------
suffix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``
"""
return self.map_predicate(lambda elem: elem.endswith(suffix))
def has_substring(self, substring):
"""
Elementwise contains.
Parameters
----------
substring : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``.
"""
return self.map_predicate(lambda elem: substring in elem)
@preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile))
def matches(self, pattern):
"""
Elementwise regex match.
Parameters
----------
pattern : str or compiled regex
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was matched by ``pattern``.
"""
return self.map_predicate(compose(bool, pattern.match))
# These types all implement an O(N) __contains__, so pre-emptively
# coerce to `set`.
@preprocess(container=coerce((list, tuple, np.ndarray), set))
def element_of(self, container):
"""
Check if each element of self is an of ``container``.
Parameters
----------
container : object
An object implementing a __contains__ to call on each element of
``self``.
Returns
-------
is_contained : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was an element of ``container``.
"""
return self.map_predicate(container.__contains__) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/lib/labelarray.py | labelarray.py |
from contextlib import contextmanager
import datetime
from functools import partial
import inspect
import re
from nose.tools import ( # noqa
assert_almost_equal,
assert_almost_equals,
assert_dict_contains_subset,
assert_false,
assert_greater,
assert_greater_equal,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_less,
assert_less_equal,
assert_multi_line_equal,
assert_not_almost_equal,
assert_not_almost_equals,
assert_not_equal,
assert_not_equals,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_raises_regexp,
assert_regexp_matches,
assert_true,
assert_tuple_equal,
)
import numpy as np
import pandas as pd
from pandas.util.testing import (
assert_frame_equal,
assert_panel_equal,
assert_series_equal,
assert_index_equal,
)
from six import iteritems, viewkeys, PY2
from toolz import dissoc, keyfilter
import toolz.curried.operator as op
from zipline.testing.core import ensure_doctest
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return '<%s>' % type(self).__name__
__str__ = __repr__
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return inspect.getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _s(word, seq, suffix='s'):
"""Adds a suffix to ``word`` if some sequence has anything other than
exactly one element.
word : str
The string to add the suffix to.
seq : sequence
The sequence to check the length of.
suffix : str, optional.
The suffix to add to ``word``
Returns
-------
maybe_plural : str
``word`` with ``suffix`` added if ``len(seq) != 1``.
"""
return word + (suffix if len(seq) != 1 else '')
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ''
return 'path: _' + ''.join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ''
return msg + '\n'
def _safe_cls_name(cls):
try:
return cls.__name__
except AttributeError:
return repr(cls)
def assert_is_subclass(subcls, cls, msg=''):
"""Assert that ``subcls`` is a subclass of ``cls``.
Parameters
----------
subcls : type
The type to check.
cls : type
The type to check ``subcls`` against.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert issubclass(subcls, cls), (
'%s is not a subclass of %s\n%s' % (
_safe_cls_name(subcls),
_safe_cls_name(cls),
msg,
)
)
def assert_regex(result, expected, msg=''):
"""Assert that ``expected`` matches the result.
Parameters
----------
result : str
The string to search.
expected : str or compiled regex
The pattern to search for in ``result``.
msg : str, optional
An extra assertion message to print if this fails.
"""
assert re.search(expected, result), (
'%s%r not found in %r' % (_fmt_msg(msg), expected, result)
)
@contextmanager
def assert_raises_regex(exc, pattern, msg=''):
"""Assert that some exception is raised in a context and that the message
matches some pattern.
Parameters
----------
exc : type or tuple[type]
The exception type or types to expect.
pattern : str or compiled regex
The pattern to search for in the str of the raised exception.
msg : str, optional
An extra assertion message to print if this fails.
"""
try:
yield
except exc as e:
assert re.search(pattern, str(e)), (
'%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e))
)
else:
raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc))
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg='', **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
assert result == expected, '%s%s != %s\n%s' % (
_fmt_msg(msg),
result,
expected,
_fmt_path(path),
)
@assert_equal.register(float, float)
def assert_float_equal(result,
expected,
path=(),
msg='',
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(' (with nan != nan)' if not float_equal_nan else ''),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = 'extra %s in result: %r' % (_s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = 'result is missing %s: %r' % (_s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = '%s only in result: %s\n%s only in expected: %s' % (
_s(type_, in_result),
in_result,
_s(type_, in_expected),
in_expected,
)
raise AssertionError(
'%s%ss do not match\n%s' % (
_fmt_msg(msg),
type_,
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
viewkeys(result),
viewkeys(expected),
msg,
path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),),
'key',
)
failures = []
for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)):
try:
assert_equal(
resultv,
expectedv,
path=path + ('[%r]' % k,),
msg=msg,
**kwargs
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError('\n'.join(failures))
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg='', **kwargs):
result_len = len(result)
expected_len = len(expected)
assert result_len == expected_len, (
'%s%s lengths do not match: %d != %d\n%s' % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv,
expectedv,
path=path + ('[%d]' % n,),
msg=msg,
**kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg='', **kwargs):
_check_sets(
result,
expected,
msg,
path,
'element',
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(result,
expected,
path=(),
msg='',
array_verbose=True,
array_decimal=None,
**kwargs):
f = (
np.testing.assert_array_equal
if array_decimal is None else
partial(np.testing.assert_array_almost_equal, decimal=array_decimal)
)
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError('\n'.join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
**kwargs
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + ('.as_int_array()',),
**kwargs
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs):
try:
assert_eq(
result,
expected,
**filter_kwargs(assert_eq, kwargs)
)
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_panel_equal = _register_assert_equal_wrapper(
pd.Panel,
assert_panel_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg='', **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + ('.categories',),
msg=msg,
**kwargs
)
assert_equal(
result.codes,
expected.codes,
path=path + ('.codes',),
msg=msg,
**kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ('.' + attr,),
**kwargs
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(result,
expected,
path=(),
msg='',
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(expected), (
"%sdatetime types (%s, %s) don't match and "
"allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
)
result = pd.Timestamp(result)
expected = pd.Timestamp(result)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result,
expected,
path=path,
**kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=''):
diff_start = (
('starts are not equal: %s != %s' % (result.start, result.stop))
if result.start != expected.start else
''
)
diff_stop = (
('stops are not equal: %s != %s' % (result.stop, result.stop))
if result.stop != expected.stop else
''
)
diff_step = (
('steps are not equal: %s != %s' % (result.step, result.stop))
if result.step != expected.step else
''
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), '%s%s\n%s' % (
_fmt_msg(msg),
'\n'.join(filter(None, diffs)),
_fmt_path(path),
)
def assert_isidentical(result, expected, msg=''):
assert result.isidentical(expected), (
'%s%s is not identical to %s' % (_fmt_msg(msg), result, expected)
)
try:
# pull the dshape cases in
from datashape.util.testing import assert_dshape_equal
except ImportError:
pass
else:
assert_equal.funcs.update(
dissoc(assert_dshape_equal.funcs, (object, object)),
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/testing/predicates.py | predicates.py |
from itertools import repeat
import os
import sqlite3
from unittest import TestCase
from contextlib2 import ExitStack
from logbook import NullHandler, Logger
from six import with_metaclass, iteritems
from toolz import flip
import pandas as pd
import responses
from .core import (
create_daily_bar_data,
create_minute_bar_data,
make_simple_equity_info,
tmp_asset_finder,
tmp_dir,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.loader import (
get_benchmark_filename,
INDEX_MAPPING,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader
)
from ..data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..finance.trading import TradingEnvironment
from ..utils import factory
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
import zipline
from zipline.assets import Equity, Future
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.calendars import (
get_calendar,
register_calendar)
from zipline.utils.paths import ensure_directory
zipline_dir = os.path.dirname(zipline.__file__)
class ZiplineTestCase(with_metaclass(FinalMeta, TestCase)):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except:
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
'Called init_class_fixtures from init_instance_fixtures.'
'Did you write super(..., self).init_class_fixtures() instead'
' of super(..., self).init_instance_fixtures()?',
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
'Attempted to enter a class context in init_instance_fixtures.'
'\nDid you mean to call enter_instance_context?',
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
'Attempted to add a class callback in init_instance_fixtures.'
'\nDid you mean to call add_instance_callback?',
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except:
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(object):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
class WithLogger(object):
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C')
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE')
ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE')
@classmethod
def _make_info(cls):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
register_calendar("TEST", get_calendar("NYSE"), force=True)
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
)
@classmethod
def make_asset_finder_db_url(cls):
return 'sqlite:///:memory:'
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
return cls.enter_class_context(tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=cls.make_equity_info(),
futures=cls.make_futures_info(),
exchanges=cls.make_exchanges_info(),
root_symbols=cls.make_root_symbols_info(),
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
))
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
class WithTradingCalendars(object):
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ('NYSE',)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'}
TRADING_CALENDAR_FOR_EXCHANGE = {}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = 'NYSE'
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
for cal_str in (
set(cls.TRADING_CALENDAR_STRS) |
{cls.TRADING_CALENDAR_PRIMARY_CAL}
):
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls,
'{0}_calendar'.format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
for asset_type, cal_str in iteritems(
cls.TRADING_CALENDAR_FOR_ASSET_TYPE):
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE):
register_calendar(exchange, get_calendar(cal_str))
cls.trading_calendars[exchange] = get_calendar(cal_str)
cls.trading_calendar = cls.trading_calendars[
cls.TRADING_CALENDAR_PRIMARY_CAL]
class WithTradingEnvironment(WithAssetFinder,
WithTradingCalendars,
WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.env as a class-level fixture.
After ``init_class_fixtures`` has been called, `cls.env` is populated
with a trading environment whose `asset_finder` is the result of
`cls.make_asset_finder`.
Attributes
----------
TRADING_ENV_MIN_DATE : datetime
The min_date to forward to the constructed TradingEnvironment.
TRADING_ENV_MAX_DATE : datetime
The max date to forward to the constructed TradingEnvironment.
TRADING_ENV_TRADING_CALENDAR : pd.DatetimeIndex
The trading calendar to use for the class's TradingEnvironment.
TRADING_ENV_FUTURE_CHAIN_PREDICATES : dict
The roll predicates to apply when creating contract chains.
Methods
-------
make_load_function() -> callable
A class method that returns the ``load`` argument to pass to the
constructor of ``TradingEnvironment`` for this class.
The signature for the callable returned is:
``(datetime, pd.DatetimeIndex, str) -> (pd.Series, pd.DataFrame)``
make_trading_environment() -> TradingEnvironment
A class method that constructs the trading environment for the class.
If this is overridden then ``make_load_function`` or the class
attributes may not be respected.
See Also
--------
:class:`zipline.finance.trading.TradingEnvironment`
"""
TRADING_ENV_FUTURE_CHAIN_PREDICATES = None
MARKET_DATA_DIR = os.path.join(zipline_dir, 'resources', 'market_data')
@classmethod
def make_load_function(cls):
def load(*args, **kwargs):
symbol = 'SPY'
filename = get_benchmark_filename(symbol)
source_path = os.path.join(cls.MARKET_DATA_DIR, filename)
benchmark_returns = \
pd.Series.from_csv(source_path).tz_localize('UTC')
filename = INDEX_MAPPING[symbol][1]
source_path = os.path.join(cls.MARKET_DATA_DIR, filename)
treasury_curves = \
pd.DataFrame.from_csv(source_path).tz_localize('UTC')
# The TradingEnvironment ordinarily uses cached benchmark returns
# and treasury curves data, but when running the zipline tests this
# cache is not always updated to include the appropriate dates
# required by both the futures and equity calendars. In order to
# create more reliable and consistent data throughout the entirety
# of the tests, we read static benchmark returns and treasury curve
# csv files from source. If a test using the TradingEnvironment
# fixture attempts to run outside of the static date range of the
# csv files, raise an exception warning the user to either update
# the csv files in source or to use a date range within the current
# bounds.
static_start_date = benchmark_returns.index[0].date()
static_end_date = benchmark_returns.index[-1].date()
warning_message = (
'The TradingEnvironment fixture uses static data between '
'{static_start} and {static_end}. To use a start and end date '
'of {given_start} and {given_end} you will have to update the '
'files in {resource_dir} to include the missing dates.'.format(
static_start=static_start_date,
static_end=static_end_date,
given_start=cls.START_DATE.date(),
given_end=cls.END_DATE.date(),
resource_dir=cls.MARKET_DATA_DIR,
)
)
if cls.START_DATE.date() < static_start_date or \
cls.END_DATE.date() > static_end_date:
raise AssertionError(warning_message)
return benchmark_returns, treasury_curves
return load
@classmethod
def make_trading_environment(cls):
return TradingEnvironment(
load=cls.make_load_function(),
asset_db_path=cls.asset_finder.engine,
trading_calendar=cls.trading_calendar,
future_chain_predicates=cls.TRADING_ENV_FUTURE_CHAIN_PREDICATES,
)
@classmethod
def init_class_fixtures(cls):
super(WithTradingEnvironment, cls).init_class_fixtures()
cls.env = cls.make_trading_environment()
class WithSimParams(WithTradingEnvironment):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
The arguments used to construct the trading environment may be overridded
by putting ``SIM_PARAMS_{argname}`` in the class dict except for the
trading environment which is overridden with the mechanisms provided by
``WithTradingEnvironment``.
Attributes
----------
SIM_PARAMS_YEAR : int
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_NUM_DAYS : int
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``factory.create_simulation_parameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``factory.create_simulation_parameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
See Also
--------
zipline.utils.factory.create_simulation_parameters
"""
SIM_PARAMS_YEAR = None
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_NUM_DAYS = None
SIM_PARAMS_DATA_FREQUENCY = 'daily'
SIM_PARAMS_EMISSION_RATE = 'daily'
SIM_PARAMS_START = alias('START_DATE')
SIM_PARAMS_END = alias('END_DATE')
@classmethod
def make_simparams(cls):
return factory.create_simulation_parameters(
year=cls.SIM_PARAMS_YEAR,
start=cls.SIM_PARAMS_START,
end=cls.SIM_PARAMS_END,
num_days=cls.SIM_PARAMS_NUM_DAYS,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithTradingCalendars, WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias('START_DATE')
DATA_MAX_DAY = alias('END_DATE')
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias('nyse_sessions')
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
# Set name for aliasing.
setattr(cls,
'{0}_sessions'.format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE):
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
cls.DATA_MIN_DAY, cls.DATA_MAX_DAY)
cls.trading_sessions[exchange] = sessions
class WithTmpDir(object):
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir(object):
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithTradingEnvironment):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
Methods
-------
make_equity_daily_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns an iterator of (sid, dataframe) pairs
which will be written to the bcolz files that the class's
``BcolzDailyBarReader`` will read from. By default this creates
some simple sythetic data with
:func:`~zipline.testing.create_daily_bar_data`
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
"""
EQUITY_DAILY_BAR_USE_FULL_CALENDAR = False
EQUITY_DAILY_BAR_START_DATE = alias('START_DATE')
EQUITY_DAILY_BAR_END_DATE = alias('END_DATE')
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(cls, WithEquityMinuteBarData), \
"Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid],
cls.trading_calendars[Equity])
@classmethod
def make_equity_daily_bar_data(cls):
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(
cls.equity_daily_bar_days,
cls.asset_finder.equities_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if cls.EQUITY_DAILY_BAR_USE_FULL_CALENDAR:
days = trading_calendar.all_sessions
else:
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE)
)
if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session,
-1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.EQUITY_DAILY_BAR_END_DATE,
)
cls.equity_daily_bar_days = days
class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.bcolz_equity_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
EQUITY_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``equity_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``EQUITY_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzEquityMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_DAILY_BAR_PATH = 'daily_equity_pricing.bcolz'
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False
# allows WithBcolzEquityDailyBarReaderFromCSVs to call the
# `write_csvs`method without needing to reimplement `init_class_fixtures`
_write_method_name = 'write'
# What to do when data being written is invalid, e.g. nan, inf, etc.
# options are: 'warn', 'raise', 'ignore'
INVALID_DATA_BEHAVIOR = 'warn'
@classmethod
def make_bcolz_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures()
cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path()
days = cls.equity_daily_bar_days
trading_calendar = cls.trading_calendars[Equity]
cls.bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls._write_method_name,
)(
cls.make_equity_daily_bar_data(),
invalid_data_behavior=cls.INVALID_DATA_BEHAVIOR
)
if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD)
else:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin that provides
cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV
file paths.
"""
_write_method_name = 'write_csvs'
def _trading_days_for_minute_bars(calendar,
start_date,
end_date,
lookback_days):
first_session = calendar.minute_to_session_label(start_date)
if lookback_days > 0:
first_session = calendar.sessions_window(
first_session,
-1 * lookback_days
)[0]
return calendar.sessions_in_range(first_session, end_date)
class _WithMinuteBarDataBase(WithTradingEnvironment):
MINUTE_BAR_LOOKBACK_DAYS = 0
MINUTE_BAR_START_DATE = alias('START_DATE')
MINUTE_BAR_END_DATE = alias('END_DATE')
class WithEquityMinuteBarData(_WithMinuteBarDataBase):
"""
ZiplineTestCase mixin providing cls.equity_minute_bar_days.
After init_class_fixtures has been called:
- `cls.equity_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
EQUITY_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)]
Classmethod producing an iterator of (sid, minute_data) pairs.
The default implementation invokes
zipline.testing.core.create_minute_bar_data.
See Also
--------
WithEquityDailyBarData
zipline.testing.create_minute_bar_data
"""
EQUITY_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS')
EQUITY_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE')
EQUITY_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE')
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
),
cls.asset_finder.equities_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityMinuteBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
cls.equity_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.EQUITY_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.EQUITY_MINUTE_BAR_END_DATE),
cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
)
class WithFutureMinuteBarData(_WithMinuteBarDataBase):
"""
ZiplineTestCase mixin providing cls.future_minute_bar_days.
After init_class_fixtures has been called:
- `cls.future_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
FUTURE_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
FUTURE_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_future_minute_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns a dict mapping sid to dataframe
which will be written to into the the format of the inherited
class which writes the minute bar data for use by a reader.
By default this creates some simple sythetic data with
:func:`~zipline.testing.create_minute_bar_data`
See Also
--------
zipline.testing.create_minute_bar_data
"""
FUTURE_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS')
FUTURE_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE')
FUTURE_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE')
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = get_calendar('us_futures')
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
),
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureMinuteBarData, cls).init_class_fixtures()
trading_calendar = get_calendar('us_futures')
cls.future_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.FUTURE_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.FUTURE_MINUTE_BAR_END_DATE),
cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS
)
class WithBcolzEquityMinuteBarReader(WithEquityMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_EQUITY_MINUTE_BAR_PATH = 'minute_equity_pricing'
@classmethod
def make_bcolz_equity_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_EQUITY_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures()
cls.bcolz_equity_minute_bar_path = p = \
cls.make_bcolz_equity_minute_bar_rootdir_path()
days = cls.equity_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
cls.trading_calendars[Equity],
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
)
writer.write(cls.make_equity_minute_bar_data())
cls.bcolz_equity_minute_bar_reader = \
BcolzMinuteBarReader(p)
class WithBcolzFutureMinuteBarReader(WithFutureMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_FUTURE_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_FUTURE_MINUTE_BAR_PATH = 'minute_future_pricing'
OHLC_RATIOS_PER_SID = None
@classmethod
def make_bcolz_future_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures()
trading_calendar = get_calendar('us_futures')
cls.bcolz_future_minute_bar_path = p = \
cls.make_bcolz_future_minute_bar_rootdir_path()
days = cls.future_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
trading_calendar,
days[0],
days[-1],
FUTURES_MINUTES_PER_DAY,
ohlc_ratios_per_sid=cls.OHLC_RATIOS_PER_SID,
)
writer.write(cls.make_future_minute_bar_data())
cls.bcolz_future_minute_bar_reader = \
BcolzMinuteBarReader(p)
class WithConstantEquityMinuteBarData(WithEquityMinuteBarData):
EQUITY_MINUTE_CONSTANT_LOW = 3.0
EQUITY_MINUTE_CONSTANT_OPEN = 4.0
EQUITY_MINUTE_CONSTANT_CLOSE = 5.0
EQUITY_MINUTE_CONSTANT_HIGH = 6.0
EQUITY_MINUTE_CONSTANT_VOLUME = 100.0
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
sids = cls.asset_finder.equities_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': cls.EQUITY_MINUTE_CONSTANT_OPEN,
'high': cls.EQUITY_MINUTE_CONSTANT_HIGH,
'low': cls.EQUITY_MINUTE_CONSTANT_LOW,
'close': cls.EQUITY_MINUTE_CONSTANT_CLOSE,
'volume': cls.EQUITY_MINUTE_CONSTANT_VOLUME,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
class WithConstantFutureMinuteBarData(WithFutureMinuteBarData):
FUTURE_MINUTE_CONSTANT_LOW = 3.0
FUTURE_MINUTE_CONSTANT_OPEN = 4.0
FUTURE_MINUTE_CONSTANT_CLOSE = 5.0
FUTURE_MINUTE_CONSTANT_HIGH = 6.0
FUTURE_MINUTE_CONSTANT_VOLUME = 100.0
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
'open': cls.FUTURE_MINUTE_CONSTANT_OPEN,
'high': cls.FUTURE_MINUTE_CONSTANT_HIGH,
'low': cls.FUTURE_MINUTE_CONSTANT_LOW,
'close': cls.FUTURE_MINUTE_CONSTANT_CLOSE,
'volume': cls.FUTURE_MINUTE_CONSTANT_VOLUME,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
class WithAdjustmentReader(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin providing cls.adjustment_reader as a class level
fixture.
After init_class_fixtures has been called, `cls.adjustment_reader` will be
populated with a new SQLiteAdjustmentReader object. The data that will be
written can be passed by overriding `make_{field}_data` where field may
be `splits`, `mergers` `dividends`, or `stock_dividends`.
The daily bar reader used for this adjustment reader may be customized
by overriding `make_adjustment_writer_equity_daily_bar_reader`.
This is useful to providing a `MockDailyBarReader`.
Methods
-------
make_splits_data() -> pd.DataFrame
A class method that returns a dataframe of splits data to write to the
class's adjustment db. By default this is empty.
make_mergers_data() -> pd.DataFrame
A class method that returns a dataframe of mergers data to write to the
class's adjustment db. By default this is empty.
make_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of dividends data to write to
the class's adjustment db. By default this is empty.
make_stock_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of stock dividends data to
write to the class's adjustment db. By default this is empty.
make_adjustment_db_conn_str() -> string
A class method that returns the sqlite3 connection string for the
database in to which the adjustments will be written. By default this
is an in-memory database.
make_adjustment_writer_equity_daily_bar_reader() -> pd.DataFrame
A class method that returns the daily bar reader to use for the class's
adjustment writer. By default this is the class's actual
``bcolz_equity_daily_bar_reader`` as inherited from
``WithBcolzEquityDailyBarReader``. This should probably not be
overridden; however, some tests used a ``MockDailyBarReader``
for this.
make_adjustment_writer(conn: sqlite3.Connection) -> AdjustmentWriter
A class method that constructs the adjustment which will be used
to write the data into the connection to be used by the class's
adjustment reader.
See Also
--------
zipline.testing.MockDailyBarReader
"""
@classmethod
def _make_data(cls):
return None
make_splits_data = _make_data
make_mergers_data = _make_data
make_dividends_data = _make_data
make_stock_dividends_data = _make_data
del _make_data
@classmethod
def make_adjustment_writer(cls, conn):
return SQLiteAdjustmentWriter(
conn,
cls.make_adjustment_writer_equity_daily_bar_reader(),
cls.equity_daily_bar_days,
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return cls.bcolz_equity_daily_bar_reader
@classmethod
def make_adjustment_db_conn_str(cls):
return ':memory:'
@classmethod
def init_class_fixtures(cls):
super(WithAdjustmentReader, cls).init_class_fixtures()
conn = sqlite3.connect(cls.make_adjustment_db_conn_str())
cls.make_adjustment_writer(conn).write(
splits=cls.make_splits_data(),
mergers=cls.make_mergers_data(),
dividends=cls.make_dividends_data(),
stock_dividends=cls.make_stock_dividends_data(),
)
cls.adjustment_reader = SQLiteAdjustmentReader(conn)
class WithEquityPricingPipelineEngine(WithAdjustmentReader,
WithTradingSessions):
"""
Mixin providing the following as a class-level fixtures.
- cls.data_root_dir
- cls.findata_dir
- cls.pipeline_engine
- cls.adjustments_db_path
"""
@classmethod
def init_class_fixtures(cls):
cls.data_root_dir = cls.enter_class_context(tmp_dir())
cls.findata_dir = cls.data_root_dir.makedir('findata')
super(WithEquityPricingPipelineEngine, cls).init_class_fixtures()
loader = USEquityPricingLoader(
cls.bcolz_equity_daily_bar_reader,
SQLiteAdjustmentReader(cls.adjustments_db_path),
)
dispatcher = dict(
zip(USEquityPricing.columns, repeat(loader))
).__getitem__
cls.pipeline_engine = SimplePipelineEngine(
get_loader=dispatcher,
calendar=cls.nyse_sessions,
asset_finder=cls.asset_finder,
)
@classmethod
def make_adjustment_db_conn_str(cls):
cls.adjustments_db_path = os.path.join(
cls.findata_dir,
'adjustments',
cls.END_DATE.strftime("%Y-%m-%d-adjustments.db")
)
ensure_directory(os.path.dirname(cls.adjustments_db_path))
return cls.adjustments_db_path
class WithSeededRandomPipelineEngine(WithTradingSessions, WithAssetFinder):
"""
ZiplineTestCase mixin providing class-level fixtures for running pipelines
against deterministically-generated random data.
Attributes
----------
SEEDED_RANDOM_PIPELINE_SEED : int
Fixture input. Random seed used to initialize the random state loader.
seeded_random_loader : SeededRandomLoader
Fixture output. Loader capable of providing columns for
zipline.pipeline.data.testing.TestingDataSet.
seeded_random_engine : SimplePipelineEngine
Fixture output. A pipeline engine that will use seeded_random_loader
as its only data provider.
Methods
-------
run_pipeline(start_date, end_date)
Run a pipeline with self.seeded_random_engine.
See Also
--------
zipline.pipeline.loaders.synthetic.SeededRandomLoader
zipline.pipeline.loaders.testing.make_seeded_random_loader
zipline.pipeline.engine.SimplePipelineEngine
"""
SEEDED_RANDOM_PIPELINE_SEED = 42
@classmethod
def init_class_fixtures(cls):
super(WithSeededRandomPipelineEngine, cls).init_class_fixtures()
cls._sids = cls.asset_finder.sids
cls.seeded_random_loader = loader = make_seeded_random_loader(
cls.SEEDED_RANDOM_PIPELINE_SEED,
cls.trading_days,
cls._sids,
)
cls.seeded_random_engine = SimplePipelineEngine(
get_loader=lambda column: loader,
calendar=cls.trading_days,
asset_finder=cls.asset_finder,
)
def raw_expected_values(self, column, start_date, end_date):
"""
Get an array containing the raw values we expect to be produced for the
given dates between start_date and end_date, inclusive.
"""
all_values = self.seeded_random_loader.values(
column.dtype,
self.trading_days,
self._sids,
)
row_slice = self.trading_days.slice_indexer(start_date, end_date)
return all_values[row_slice]
def run_pipeline(self, pipeline, start_date, end_date):
"""
Run a pipeline with self.seeded_random_engine.
"""
if start_date not in self.trading_days:
raise AssertionError("Start date not in calendar: %s" % start_date)
if end_date not in self.trading_days:
raise AssertionError("End date not in calendar: %s" % end_date)
return self.seeded_random_engine.run_pipeline(
pipeline,
start_date,
end_date,
)
class WithDataPortal(WithAdjustmentReader,
# Ordered so that bcolz minute reader is used first.
WithBcolzEquityMinuteBarReader,
WithBcolzFutureMinuteBarReader):
"""
ZiplineTestCase mixin providing self.data_portal as an instance level
fixture.
After init_instance_fixtures has been called, `self.data_portal` will be
populated with a new data portal created by passing in the class's
trading env, `cls.bcolz_equity_minute_bar_reader`,
`cls.bcolz_equity_daily_bar_reader`, and `cls.adjustment_reader`.
Attributes
----------
DATA_PORTAL_USE_DAILY_DATA : bool
Should the daily bar reader be used? Defaults to True.
DATA_PORTAL_USE_MINUTE_DATA : bool
Should the minute bar reader be used? Defaults to True.
DATA_PORTAL_USE_ADJUSTMENTS : bool
Should the adjustment reader be used? Defaults to True.
Methods
-------
make_data_portal() -> DataPortal
Method which returns the data portal to be used for each test case.
If this is overridden, the ``DATA_PORTAL_USE_*`` attributes may not
be respected.
"""
DATA_PORTAL_USE_DAILY_DATA = True
DATA_PORTAL_USE_MINUTE_DATA = True
DATA_PORTAL_USE_ADJUSTMENTS = True
DATA_PORTAL_FIRST_TRADING_DAY = None
DATA_PORTAL_LAST_AVAILABLE_SESSION = None
DATA_PORTAL_LAST_AVAILABLE_MINUTE = None
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
DATA_PORTAL_DAILY_HISTORY_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
def make_data_portal(self):
if self.DATA_PORTAL_FIRST_TRADING_DAY is None:
if self.DATA_PORTAL_USE_MINUTE_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_minute_bar_reader.
first_trading_day)
elif self.DATA_PORTAL_USE_DAILY_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_daily_bar_reader.
first_trading_day)
return DataPortal(
self.env.asset_finder,
self.trading_calendar,
first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA else
None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
adjustment_reader=(
self.adjustment_reader
if self.DATA_PORTAL_USE_ADJUSTMENTS else
None
),
future_minute_reader=(
self.bcolz_future_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA else
None
),
future_daily_reader=(
MinuteResampleSessionBarReader(
self.bcolz_future_minute_bar_reader.trading_calendar,
self.bcolz_future_minute_bar_reader)
if self.DATA_PORTAL_USE_MINUTE_DATA else None
),
last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION,
last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE,
minute_history_prefetch_length=self.
DATA_PORTAL_MINUTE_HISTORY_PREFETCH,
daily_history_prefetch_length=self.
DATA_PORTAL_DAILY_HISTORY_PREFETCH,
)
def init_instance_fixtures(self):
super(WithDataPortal, self).init_instance_fixtures()
self.data_portal = self.make_data_portal()
class WithResponses(object):
"""
ZiplineTestCase mixin that provides self.responses as an instance
fixture.
After init_instance_fixtures has been called, `self.responses` will be
a new `responses.RequestsMock` object. Users may add new endpoints to this
with the `self.responses.add` method.
"""
def init_instance_fixtures(self):
super(WithResponses, self).init_instance_fixtures()
self.responses = self.enter_instance_context(
responses.RequestsMock(),
)
class WithCreateBarData(WithDataPortal):
CREATE_BARDATA_DATA_FREQUENCY = 'minute'
def create_bardata(self, simulation_dt_func, restrictions=None):
return BarData(
self.data_portal,
simulation_dt_func,
self.CREATE_BARDATA_DATA_FREQUENCY,
self.trading_calendar,
restrictions or NoRestrictions()
) | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/testing/fixtures.py | fixtures.py |
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
from functools import wraps
import gzip
from inspect import getargspec
from itertools import (
combinations,
count,
product,
)
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
from sys import _getframe
import tempfile
from logbook import TestHandler
from mock import patch
from nose.tools import nottest
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from six import itervalues, iteritems, with_metaclass
from six.moves import filter, map
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from zipline.assets import AssetFinder, AssetDBWriter
from zipline.assets.synthetic import make_simple_equity_info
from zipline.data.data_portal import DataPortal
from zipline.data.loader import get_benchmark_filename, INDEX_MAPPING
from zipline.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY
)
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentWriter,
)
from zipline.finance.blotter import Blotter
from zipline.finance.trading import TradingEnvironment
from zipline.finance.order import ORDER_STATUS
from zipline.lib.labelarray import LabelArray
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.utils import security_list
from zipline.utils.calendars import get_calendar
from zipline.utils.input_validation import expect_dimensions
from zipline.utils.numpy_utils import as_column, isnat
from zipline.utils.pandas_utils import timedelta_to_integral_seconds
from zipline.utils.paths import ensure_directory
from zipline.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz='UTC')
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit='s', tz='UTC')
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH)
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def check_algo_results(test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order['id'] for order in
flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(os.path.join(old_dir, subdir),
os.path.join(new_dir, subdir))
with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \
patch.object(security_list, 'using_copy', True,
create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, 'using_copy'):
raise Exception('add_security_data must be used within '
'security_list_copy context')
directory = os.path.join(
security_list.SECURITY_LISTS_DIR,
"leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, 'w') as f:
for sym in deletes:
f.write(sym)
f.write('\n')
add_path = os.path.join(directory, "add")
with open(add_path, 'w') as f:
for sym in adds:
f.write(sym)
f.write('\n')
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def chrange(start, stop):
"""
Construct an iterable of length-1 strings beginning with `start` and ending
with `stop`.
Parameters
----------
start : str
The first character.
stop : str
The last character.
Returns
-------
chars: iterable[str]
Iterable of strings beginning with start and ending with stop.
Example
-------
>>> chrange('A', 'C')
['A', 'B', 'C']
"""
return list(map(chr, range(ord(start), ord(stop) + 1)))
def make_trade_data_for_asset_info(dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
frequency,
writer=None):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = (np.arange(len(dates), dtype=float64) *
price_step_by_date)
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
if writer:
writer.write_sid(sid, df)
trade_data[sid] = df
return trade_data
def check_allclose(actual,
desired,
rtol=1e-07,
atol=0,
err_msg='',
verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in 'mM':
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"
)
sessions = trading_calendar.sessions_in_range(first_session, last_session)
write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
'open': np.arange(length) + 10 + sid_idx,
'high': np.arange(length) + 15 + sid_idx,
'low': np.arange(length) + 8 + sid_idx,
'close': np.arange(length) + 10 + sid_idx,
'volume': 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions]
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(path, trading_calendar,
sim_params.start_session,
sim_params.end_session).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(asset_finder, tempdir, sim_params, sids,
trading_calendar, adjustment_reader=None):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids,
trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes,
sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path,
trading_calendar,
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(
start_dt, end_dt
)
minutes_count = len(asset_minutes)
minutes_arr = np.array(range(start_val, start_val + minutes_count))
df = pd.DataFrame(
{
"open": minutes_arr + 1,
"high": minutes_arr + 2,
"low": minutes_arr - 1,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if interval > 1:
counter = 0
while counter < len(minutes_arr):
df[counter:(counter + interval - 1)] = 0
counter += interval
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day,
interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in iteritems(trades_by_sid):
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(asset_finder, trading_calendar,
tempdir, sim_params, trades_by_sid):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path, trading_calendar,
sim_params.start_session,
sim_params.end_session
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open,
sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in iteritems(trades_by_sid):
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame({
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes
}).set_index("dt")
write_bcolz_minute_data(
trading_calendar,
sim_params.sessions,
tempdir.path,
assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder, trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, env, trading_calendar=None,
first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(env.asset_finder,
trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(self, assets, end_dt, bar_count, frequency, field,
data_frequency, ffill=True):
if frequency == "1d":
end_idx = \
self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1):(end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0),
index=days,
columns=assets
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar,
first_trading_day)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window,
extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel('_default_equities')
def __init__(self,
url='sqlite:///:memory:',
equities=_default_equities,
**frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
)
frames['equities'] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, '_eng was not set in __enter__'
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(self,
url='sqlite:///:memory:',
finder_cls=AssetFinder,
**frames):
self._finder_cls = finder_cls
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(super(tmp_asset_finder, self).__enter__())
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class tmp_trading_env(tmp_asset_finder):
"""Create a temporary trading environment.
Parameters
----------
load : callable, optional
Function that returns benchmark returns and treasury curves.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
empty_trading_env
tmp_asset_finder
"""
def __init__(self, load=None, *args, **kwargs):
super(tmp_trading_env, self).__init__(*args, **kwargs)
self._load = load
def __enter__(self):
return TradingEnvironment(
load=self._load,
asset_db_path=super(tmp_trading_env, self).__enter__().engine,
)
def empty_trading_env():
return tmp_trading_env(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
def __str__(self):
return 'failures:\n %s' % '\n '.join(
'\n '.join((
', '.join('%s=%r' % item for item in scope.items()),
'%s: %s' % (type(exc).__name__, exc),
)) for scope, exc in self.failures,
)
@nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception as e:
if not names:
names = count()
failures.append((dict(zip(names, scope)), e))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def get_value(self, col, sid, dt):
return 100
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def create_mock_adjustments(tempdir, days, splits=None, dividends=None,
mergers=None):
path = tempdir.getpath("test_adjustments.db")
SQLiteAdjustmentWriter(path, MockDailyBarReader(), days).write(
*create_mock_adjustment_data(splits, dividends, mergers)
)
return path
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz='utc')
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def parameter_space(__fail_fast=False, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Usage
-----
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == 'self':
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % extra
)
def make_param_sets():
return product(*(params[name] for name in argnames))
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('ex_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
('record_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('amount', 'float64'),
('sid', 'int32'),
],
),
index=pd.DatetimeIndex([], tz='UTC'),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
('effective_date', 'int64'),
('ratio', 'float64'),
('sid', 'int64'),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
alternating = np.empty(shape, dtype=np.bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not(first_value)
first_value = not(first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
'Shape must be 2-dimensional. Given shape was {}'.format(shape)
)
cascading = np.full(shape, not(first_value), dtype=np.bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
@nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, 'wb') as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, 'rb') as f:
return f.read()
zipline_git_root = abspath(
join(realpath(dirname(__file__)), '..', '..'),
)
@nottest
def test_resource_path(*path_parts):
return os.path.join(zipline_git_root, 'tests', 'resources', *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2.
"""
pass
class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError('_reader')
@abstractmethod
def _write(self, env, days, path, data):
raise NotImplementedError('_write')
def __init__(self, env, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._env = env
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
env = self._env
try:
self._write(
env,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except:
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
env : TradingEnvironment
The trading env.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(env, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
'attempted to call read_csv on %r which not in the url map' %
filepath_or_buffer,
)
with patch.object(module, 'read_csv', patched_read_csv):
yield
def copy_market_data(src_market_data_dir, dest_root_dir):
symbol = 'SPY'
filenames = (get_benchmark_filename(symbol), INDEX_MAPPING[symbol][1])
ensure_directory(os.path.join(dest_root_dir, 'data'))
for filename in filenames:
shutil.copyfile(
os.path.join(src_market_data_dir, filename),
os.path.join(dest_root_dir, 'data', filename)
)
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
_getframe(2).f_globals.setdefault('__test__', {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(Blotter):
"""Blotter that tracks how its batch_order method was called.
"""
def __init__(self, data_frequency):
super(RecordBatchBlotter, self).__init__(data_frequency)
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
####################################
# Shared factors for pipeline tests.
####################################
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [USEquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open | zipline-live | /zipline-live-1.1.0.5.tar.gz/zipline-live-1.1.0.5/zipline/testing/core.py | core.py |
.. image:: ./images/zipline-live2.small.png
:target: https://github.com/shlomikushchi/zipline-live2
:width: 212px
:align: center
:alt: zipline-live
************************************************************************************
zipline-live2 is now continued under https://github.com/shlomikushchi/zipline-trader
************************************************************************************
zipline-live2
=============
Welcome to zipline-live2, the on-premise trading platform built on top of Quantopian's
`zipline <https://github.com/quantopian/zipline>`_.
zipline-live2 is based on the `zipline-live <http://www.zipline-live.io>`_ project.
zipline-live2 is designed to be an extensible, drop-in replacement for zipline with
multiple brokerage support to enable on premise trading of zipline algorithms.
we recommend using python 3.6+ but python 2.7 is also supported.
Installation
============
use a fresh virtual env
.. code-block:: batch
pip install virtualenv
virtualenv venv
activate:
Mac OS / Linux
source venv/bin/activate
Windows
venv\Scripts\activate
installing the package:
.. code-block:: batch
pip install zipline-live2
.. image:: ./images/youtube/installing.png
:target: https://www.youtube.com/watch?v=Zh9Vs_yanXY
:width: 212px
:align: center
:alt: zipline-live
for advanced capabilities recommended way to use this package with docker using this command:
.. code-block:: docker
docker build -t quantopian/zipline .
(if your algo requires more packages, you could extend the dockerfile-dev and install using: docker build -f dockerfile-dev-t quantopian/zipline .)
you could run everything on a local machine with whatever OS you want. but you may experience package installation issues.
this is the best way to ensure that you are using the same version everyone else use.
Ingest data
===========
the quantopian-quandl is a free daily bundle.
every day you should execute this when live trading in order to get the most updated data
.. code-block:: batch
zipline ingest -b quantopian-quandl
there is no free minute data. you could use paid services and create a custom bundle for that.
if you do have the data, the package supports minute algo-trading.
Running Backtests
=================
you can run a backtest with this command:
.. code-block:: batch
zipline run -f zipline_repo/zipline/examples/dual_moving_average.py --start 2015-1-1 --end 2018-1-1 --bundle quantopian-quandl -o out.pickle --capital-base 10000
.. image:: ./images/youtube/command_line_backtest.png
:target: https://youtu.be/jeuiCpx9k7Q
:width: 212px
:align: center
:alt: zipline-live
Run the cli tool
================
.. code-block:: batch
zipline run -f ~/zipline-algos/demo.py --state-file ~/zipline-algos/demo.state --realtime-bar-target ~/zipline-algos/realtime-bars/ --broker ib --broker-uri localhost:7496:1232 --bundle quantopian-quandl --data-frequency minute
| zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/README.rst | README.rst |
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
pass
def get_root():
# we require that all commands are run from the project root, i.e. the
# directory that contains setup.py, setup.cfg, and versioneer.py .
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
pass
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
pass
def get_versions(verbose=False):
# returns dict with two keys: 'version' and 'full'
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
return get_versions()["version"]
def get_cmdclass():
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/versioneer.py | versioneer.py |
import errno
import os
from importlib import import_module
# from functools import wraps
import click
import logbook
import pandas as pd
from six import text_type
import zipline
import pkgutil
from zipline.data import bundles as bundles_module
from trading_calendars import get_calendar
from zipline.utils.compat import wraps
from zipline.utils.cli import Date, Timestamp
from zipline.utils.run_algo import _run, load_extensions
from zipline.extensions import create_args
from zipline.gens import brokers
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a zipline extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then zipline will not run if it'
' cannot load all of the specified extensions. If this is not passed or'
' --non-strict-extensions is passed then the failure will be logged but'
' execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.",
)
@click.option(
'-x',
multiple=True,
help='Any custom command line arguments to define, in key=value form.'
)
def main(extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be any python"
" expression. These are evaluated in order so they may refer to previously"
" defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
default=10e6,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default='quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--trading-calendar',
metavar='TRADING-CALENDAR',
default='NYSE',
help="The calendar you want to use e.g. LSE. NYSE is the default."
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@click.option(
'--metrics-set',
default='default',
help='The metrics set to use. New metrics sets may be registered in your'
' extension.py.',
)
@click.option(
'--blotter',
default='default',
help="The blotter to use.",
show_default=True,
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'--broker',
default=None,
help='Broker'
)
@click.option(
'--broker-uri',
default=None,
metavar='BROKER-URI',
show_default=True,
help='Connection to broker',
)
@click.option(
'--state-file',
default=None,
metavar='FILENAME',
help='Filename where the state will be stored'
)
@click.option(
'--realtime-bar-target',
default=None,
metavar='DIRNAME',
help='Directory where the realtime collected minutely bars are saved'
)
@click.option(
'--list-brokers',
is_flag=True,
help='Get list of available brokers'
)
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter,
broker,
broker_uri,
state_file,
realtime_bar_target,
list_brokers):
"""Run a backtest for the given algorithm.
"""
if list_brokers:
click.echo("Supported brokers:")
for _, name, _ in pkgutil.iter_modules(brokers.__path__):
if name != 'broker':
click.echo(name)
return
# check that the start and end dates are passed correctly
if not broker and start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if not broker and start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if not broker and end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if broker and broker_uri is None:
ctx.fail("must specify broker-uri if broker is specified")
if broker and state_file is None:
ctx.fail("must specify state-file with live trading")
if broker and realtime_bar_target is None:
ctx.fail("must specify realtime-bar-target with live trading")
brokerobj = None
if broker:
mod_name = 'zipline.gens.brokers.%s_broker' % broker.lower()
try:
bmod = import_module(mod_name)
except ImportError:
ctx.fail("unsupported broker: can't import module %s" % mod_name)
cl_name = '%sBroker' % broker.upper()
try:
bclass = getattr(bmod, cl_name)
except AttributeError:
ctx.fail("unsupported broker: can't import class %s from %s" %
(cl_name, mod_name))
brokerobj = bclass(broker_uri)
if end is None:
end = pd.Timestamp.utcnow() + pd.Timedelta(days=1, seconds=1) # Add 1-second to assure that end is > 1day
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
perf = _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
teardown=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_returns=None,
broker=brokerobj,
state_filename=state_file,
realtime_bar_target=realtime_bar_target,
performance_callback=None,
stop_execution_callback=None,
execution_id=None
)
if output == '-':
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-b',
'--bundle',
default='quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to ingest.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.option(
'--file-logging/--no-file-logging',
default=False,
help='Duplicate log to file.'
)
def ingest(bundle, assets_version, show_progress, file_logging):
"""Ingest the data for the given bundle.
"""
from logbook import FileHandler
import datetime
from zipline.utils.paths import zipline_root
if file_logging:
log_file = zipline_root() + f'/ingest{datetime.datetime.now().strftime("%d%m%Y%H%M")}.log'
FileHandler(log_file, bubble=True).push_application()
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
'-b',
'--bundle',
default='quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to ingest.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
@click.option(
'--file-logging/--no-file-logging',
default=False,
help='Duplicate log to file.'
)
def exclude_and_ingest(bundle, assets_version, show_progress, file_logging):
"""Ingest the data for the given bundle.
"""
from logbook import FileHandler
import datetime
from zipline.data.bundles.excluder import exclude_from_web
from zipline.utils.paths import zipline_root
if file_logging:
log_file = zipline_root() + f'/ingest_{bundle}_{datetime.datetime.now().strftime("%d%m%Y%H%M")}.log'
FileHandler(log_file, bubble=True).push_application()
exclude_from_web(bundle_module=bundle.replace('-', '_'),
look_for_file=True)
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
'-s',
'--start-date',
default='2013-01-01',
help='Starting date fundamentals download.',
)
@click.option(
'--file-logging/--no-file-logging',
default=False,
help='Duplicate log to file.'
)
def ingest_fundamentals(start_date, file_logging):
"""Ingest the data for the given bundle.
"""
from logbook import FileHandler
import datetime
from zipline.utils.paths import zipline_root
if file_logging:
log_file = zipline_root() + f'/ingest_fundamentals_{datetime.datetime.now().strftime("%d%m%Y%H%M")}.log'
FileHandler(log_file, bubble=True).push_application()
bundles_module.quandl_fundamentals.download_all(start_date)
@main.command()
@click.option(
'-b',
'--bundle',
default='quandl',
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
if __name__ == '__main__':
main() | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/__main__.py | __main__.py |
import os.path
from datetime import datetime, timedelta
import logbook
import pandas as pd
import numpy as np
import pytz
from dateutil.relativedelta import relativedelta
from zipline.finance.blotter.blotter_live import BlotterLive
from zipline.algorithm import TradingAlgorithm
from zipline.errors import ScheduleFunctionOutsideTradingStart
from zipline.gens.realtimeclock import RealtimeClock
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.utils.api_support import ZiplineAPI, \
allowed_only_in_before_trading_start, api_method
from zipline.utils.pandas_utils import normalize_date
from zipline.utils.serialization_utils import load_context, store_context
from zipline.finance.metrics import MetricsTracker, load as load_metrics_set
log = logbook.Logger("Live Trading")
# how many minutes before Trading starts needs the function before_trading_starts
# be launched
_minutes_before_trading_starts = 60*4
class LiveAlgorithmExecutor(AlgorithmSimulator):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
def _cleanup_expired_assets(self, dt, position_assets):
# In simulation this is used to close assets in the simulation end date, which makes a lot of sense.
# in our case, "simulation end" is set to 1 day from now (we might want to fix that in the future too) BUT,
# we don't really have a simulation end date, and we should let the algorithm decide when to close the assets.
pass
class LiveTradingAlgorithm(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.broker = kwargs.pop('broker', None)
self.orders = {}
self.algo_filename = kwargs.get('algo_filename', "<algorithm>")
self.state_filename = kwargs.pop('state_filename', None)
self.realtime_bar_target = kwargs.pop('realtime_bar_target', None)
# Persistence blacklist/whitelist and excludes gives a way to include/
# exclude (so do not persist on disk if initiated or excluded from the serialization
# function that reinstate or save the context variable to its last state).
# trading client can never be serialized, the initialized function and
# perf tracker remember the context variables and the past performance
# and need to be whitelisted
self._context_persistence_blacklist = ['trading_client']
self._context_persistence_whitelist = ['initialized', 'perf_tracker']
self._context_persistence_excludes = []
# blotter is always initialized to SimulationBlotter in run_algo.py.
# we override it here to use the LiveBlotter for live algos
blotter_live = BlotterLive(
data_frequency=kwargs['sim_params'].data_frequency,
broker=self.broker)
kwargs['blotter'] = blotter_live
super(self.__class__, self).__init__(*args, **kwargs)
log.info("initialization done")
def initialize(self, *args, **kwargs):
self._context_persistence_excludes = \
self._context_persistence_blacklist + \
[e for e in self.__dict__.keys()
if e not in self._context_persistence_whitelist]
if os.path.isfile(self.state_filename):
log.info("Loading state from {}".format(self.state_filename))
load_context(self.state_filename,
context=self,
checksum=self.algo_filename)
return
with ZiplineAPI(self):
super(self.__class__, self).initialize(*args, **kwargs)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def handle_data(self, data):
super(self.__class__, self).handle_data(data)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def teardown(self):
super(self.__class__, self).teardown()
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def _create_clock(self):
# This method is taken from TradingAlgorithm.
# The clock has been replaced to use RealtimeClock
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
assert self.sim_params.emission_rate == 'minute'
minutely_emission = True
market_opens = trading_o_and_c['market_open']
market_closes = trading_o_and_c['market_close']
# The calendar's execution times are the minutes over which we actually
# want to run the clock. Typically the execution times simply adhere to
# the market open and close times. In the case of the futures calendar,
# for example, we only want to simulate over a subset of the full 24
# hour calendar, so the execution times dictate a market open time of
# 6:31am US/Eastern and a close of 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
before_trading_start_minutes = ((pd.to_datetime(execution_opens.values)
.tz_localize('UTC').tz_convert('US/Eastern') -
timedelta(minutes=_minutes_before_trading_starts))
.tz_convert('UTC'))
return RealtimeClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
time_skew=self.broker.time_skew,
is_broker_alive=self.broker.is_alive,
execution_id=self.sim_params._execution_id if hasattr(self.sim_params, "_execution_id") else None,
stop_execution_callback=self._stop_execution_callback
)
def _create_generator(self, sim_params):
# Call the simulation trading algorithm for side-effects:
# it creates the perf tracker
TradingAlgorithm._create_generator(self, self.sim_params)
# capital base is the ammount of money the algo can use
# it must be set with run_algorithm, and it's optional in cli mode with default value of 10 million
# please note that in python: 10**7 or 10e6 is 10 million or 10000000
# note2: the default value is defined in zipline/__main__.py under `--capital-base` option
# we need to support these scenarios:
# 1. cli mode with default param - we need to replace 10e6 with value from broker
# 2. run_algorithm or cli with specified value - if I have more than one algo running and I want to allocate
# a specific value for each algo, I cannot override it with value from broker because it will set to max val
# so, we will check if it's default value - assuming at this stage capital used for one algo will be less
# than 10e6, we will override it with value from broker. if it's specified to something else we will not change
# anything.
if self.metrics_tracker._capital_base == 10e6: # should be changed in the future with a centralized value
# the capital base is held in the metrics_tracker then the ledger then the Portfolio, so the best
# way to handle this, since it's used in many spots, is creating a new metrics_tracker with the new
# value. and ofc intialized relevant parts. this is copied from TradingAlgorithm._create_generator
self.metrics_tracker = metrics_tracker = self._create_live_metrics_tracker()
benchmark_source = self._create_benchmark_source()
metrics_tracker.handle_start_of_simulation(benchmark_source)
# attach metrics_tracker to broker
self.broker.set_metrics_tracker(self.metrics_tracker)
self.trading_client = LiveAlgorithmExecutor(
self,
sim_params,
self.data_portal,
self.trading_client.clock,
self._create_benchmark_source(),
self.restrictions,
universe_func=self._calculate_universe
)
return self.trading_client.transform()
def _create_live_metrics_tracker(self):
"""
creating the metrics_tracker but setting values from the broker and
not from the simulatio params
:return:
"""
account = self.broker.get_account_from_broker()
capital_base = float(account['NetLiquidation'])
return MetricsTracker(
trading_calendar=self.trading_calendar,
first_session=self.sim_params.start_session,
last_session=self.sim_params.end_session,
capital_base=capital_base,
emission_rate=self.sim_params.emission_rate,
data_frequency=self.sim_params.data_frequency,
asset_finder=self.asset_finder,
metrics=self._metrics_set,
)
def updated_portfolio(self):
return self.broker.portfolio
def updated_account(self):
return self.broker.account
@api_method
@allowed_only_in_before_trading_start(
ScheduleFunctionOutsideTradingStart())
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
# If the scheduled_function() is called from initalize()
# then the state persistence would need to take care of storing and
# restoring the scheduled functions too (as initialize() only called
# once in the algorithm's life). Persisting scheduled functions are
# difficult as they are not serializable by default.
# We enforce scheduled functions to be called only from
# before_trading_start() in live trading with a decorator.
super(self.__class__, self).schedule_function(func,
date_rule,
time_rule,
half_days,
calendar)
@api_method
def symbol(self, symbol_str):
# This method works around the problem of not being able to trade
# assets which does not have ingested data for the day of trade.
# Normally historical data is loaded to bundle and the asset's
# end_date and auto_close_date is set based on the last entry from
# the bundle db. LiveTradingAlgorithm does not override order_value(),
# order_percent() & order_target(). Those higher level ordering
# functions provide a safety net to not to trade de-listed assets.
# If the asset is returned as it was ingested (end_date=yesterday)
# then CannotOrderDelistedAsset exception will be raised from the
# higher level order functions.
#
# Hence, we are increasing the asset's end_date by 10 years.
# VK: it is not clear why 10 years is used - we can easily include
# delisted assets into pipeline by doing that. Thus extending lifetimes
# only until current live trading date.
asset = super(self.__class__, self).symbol(symbol_str)
tradeable_asset = asset.to_dict()
live_today = pd.Timestamp(datetime.utcnow().date()).replace(tzinfo=pytz.UTC)
if tradeable_asset['end_date'] + pd.offsets.BDay(1) >= live_today:
tradeable_asset['end_date'] = live_today
tradeable_asset['auto_close_date'] = live_today
# end_date = pd.Timestamp((datetime.utcnow() + relativedelta(years=10)).date()).replace(tzinfo=pytz.UTC)
# tradeable_asset['end_date'] = end_date
# tradeable_asset['auto_close_date'] = end_date
log.info('Extended lifetime of asset {} to {}'.format(symbol_str,
tradeable_asset['end_date']))
# asset = asset.from_dict(tradeable_asset)
return asset.from_dict(tradeable_asset)
# return asset
def run(self, *args, **kwargs):
daily_stats = super(self.__class__, self).run(*args, **kwargs)
self.on_exit()
return daily_stats
def on_exit(self):
self.teardown()
if not self.realtime_bar_target:
return
log.info("Storing realtime bars to: {}".format(
self.realtime_bar_target))
today = str(pd.to_datetime('today').date())
subscribed_assets = self.broker.subscribed_assets
realtime_history = self.broker.get_realtime_bars(subscribed_assets,
'1m')
if not os.path.exists(self.realtime_bar_target):
os.mkdir(self.realtime_bar_target)
for asset in subscribed_assets:
filename = "ZL-%s-%s.csv" % (asset.symbol, today)
path = os.path.join(self.realtime_bar_target, filename)
realtime_history[asset].to_csv(path, mode='a',
index_label='datetime',
header=not os.path.exists(path))
def _pipeline_output(self, pipeline, chunks, name):
# This method is taken from TradingAlgorithm.
"""
Internal implementation of `pipeline_output`.
For Live Algo's we have to get the previous session as the Pipeline wont work without,
it will extrapolate such that it tries to get data for get_datetime which
is today
"""
today = normalize_date(self.get_datetime())
prev_session = normalize_date(self.trading_calendar.previous_open(today))
log.info('today in _pipeline_output : {}'.format(prev_session))
live_today = pd.Timestamp(datetime.utcnow().date()).replace(tzinfo=pytz.UTC)
lifetimes_extended = False
try:
data = self._pipeline_cache.get(name, prev_session)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, prev_session, next(chunks),
)
# If live trading then shifting the end date to today.
# Doing that for assets with the up to date end_date only.
dates_kept = np.array(data.index.get_level_values(0))
assets_kept = np.array(data.index.get_level_values(1))
for i, asset in enumerate(assets_kept):
if asset.end_date + pd.offsets.BDay(1) >= live_today:
lifetimes_extended = True
asset_dict = asset.to_dict()
asset_dict['end_date'] = live_today
asset_dict['auto_close_date'] = live_today
assets_kept[i] = asset.from_dict(asset_dict)
index = pd.MultiIndex.from_arrays([dates_kept, assets_kept])
data.index = index
self._pipeline_cache.set(name, data, valid_until)
if lifetimes_extended:
log.info('Extended lifetime of the pipeline assets to {}'.format(live_today))
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[prev_session]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns)
def _sync_last_sale_prices(self, dt=None):
"""
we get the updates from the broker so we don't need to use this method which
tries to get it from the ingested data
:param dt:
:return:
"""
pass | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/algorithm_live.py | algorithm_live.py |
import re
import six
from toolz import curry
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name])
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name)
class Namespace(object):
"""
A placeholder object representing a namespace level
"""
class Registry(object):
"""
Responsible for managing all instances of custom subclasses of a
given abstract base class - only one instance needs to be created
per abstract base class, and should be created through the
create_registry function/decorator. All management methods
for a given base class can be called through the global wrapper functions
rather than through the object instance itself.
Parameters
----------
interface : type
The abstract base class to manage.
"""
def __init__(self, interface):
self.interface = interface
self._factories = {}
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
)
def is_registered(self, name):
"""Check whether we have a factory registered under ``name``.
"""
return name in self._factories
@curry
def register(self, name, factory):
if self.is_registered(name):
raise ValueError(
"%s factory with name %r is already registered" %
(self.interface.__name__, name)
)
self._factories[name] = factory
return factory
def unregister(self, name):
try:
del self._factories[name]
except KeyError:
raise ValueError(
"%s factory %r was not already registered" %
(self.interface.__name__, name)
)
def clear(self):
self._factories.clear()
# Public wrapper methods for Registry:
def get_registry(interface):
"""
Getter method for retrieving the registry
instance for a given extendable type
Parameters
----------
interface : type
extendable type (base class)
Returns
-------
manager : Registry
The corresponding registry
"""
try:
return custom_types[interface]
except KeyError:
raise ValueError("class specified is not an extendable type")
def load(interface, name):
"""
Retrieves a custom class whose name is given.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be retrieved.
Returns
-------
obj : object
An instance of the desired class.
"""
return get_registry(interface).load(name)
@curry
def register(interface, name, custom_class):
"""
Registers a class for retrieval by the load method
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the subclass
custom_class : type
The class to register, which must be a subclass of the
abstract base class in self.dtype
"""
return get_registry(interface).register(name, custom_class)
def unregister(interface, name):
"""
If a class is registered with the given name,
it is unregistered.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be unregistered.
"""
get_registry(interface).unregister(name)
def clear(interface):
"""
Unregisters all current registered classes
Parameters
----------
interface : type
The base class for which to perform this operation
"""
get_registry(interface).clear()
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface
extensible = create_registry
# A global dictionary for storing instances of Registry:
custom_types = {} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/extensions.py | extensions.py |
from textwrap import dedent
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class IncompatibleSlippageModel(ZiplineError):
"""
Raised if a user tries to set a futures slippage model for equities or vice
versa.
"""
msg = """
You attempted to set an incompatible slippage model for {asset_type}. \
The slippage model '{given_model}' only supports {supported_asset_types}.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class IncompatibleCommissionModel(ZiplineError):
"""
Raised if a user tries to set a futures commission model for equities or
vice versa.
"""
msg = """
You attempted to set an incompatible commission model for {asset_type}. \
The commission model '{given_model}' only supports {supported_asset_types}.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class ZeroCapitalError(ZiplineError):
"""
Raised if initial capital is set at or below zero
"""
msg = "initial capital base must be greater than zero"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class ScheduleFunctionOutsideTradingStart(ZiplineError):
"""
Raised when an algorithm schedules functions outside of
before_trading_start()
"""
msg = "schedule_function() should only be called in before_trading_start()"
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options: {options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class ValueNotFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value does not exist for the specified mapping type.
"""
msg = """
Value '{value}' was not found for field '{field}'.
""".strip()
class MultipleValuesFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value that changed over time for the specified field and is
thus not resolvable without additional information provided via
as_of_date.
"""
msg = """
Multiple occurrences of the value '{value}' found for field '{field}'.
Use the 'as_of_date' or 'country_code' argument to specify when or where the
lookup should be valid.
Possible options: {options}
""".strip()
class NoValueForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a sid that
does not have a value for the specified mapping type.
"""
msg = """
No '{field}' value found for sid '{sid}'.
""".strip()
class MultipleValuesFoundForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a value that
changed over time for the specified field and is thus not resolvable
without additional information provided via as_of_date.
"""
msg = """
Multiple '{field}' values found for sid '{sid}'. Use the as_of_date' argument
to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = (
"Can't compute windowed expression {parent} with "
"windowed input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class NonPipelineInputs(ZiplineError):
"""
Raised when a non-pipeline object is passed as input to a ComputableTerm
"""
def __init__(self, term, inputs):
self.term = term
self.inputs = inputs
def __str__(self):
return (
"Unexpected input types in {}. "
"Inputs to Pipeline expressions must be Filters, Factors, "
"Classifiers, or BoundColumns.\n"
"Got the following type(s) instead: {}".format(
type(self.term).__name__,
sorted(set(map(type, self.inputs)), key=lambda t: t.__name__),
)
)
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = (
"{termname} requires at least one output when passed an outputs "
"argument."
)
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and {upper_bound}, and min "
"must be less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize()."
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class DuplicatePipelineName(ZiplineError):
"""
Raised when a user tries to attach a pipeline with a name that already
exists for another attached pipeline.
"""
msg = (
"Attempted to attach pipeline named {name!r}, but the name already "
"exists for another pipeline. Please use a different name for this "
"pipeline."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
def __init__(self, hint='', **kwargs):
if hint:
hint = ' ' + hint
kwargs['hint'] = hint
super(UnsupportedDataType, self).__init__(**kwargs)
msg = "{typename} instances with dtype {dtype} are not supported.{hint}"
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
@classmethod
def from_lookback_window(cls,
initial_message,
first_date,
lookback_start,
lookback_length):
return cls(
msg=dedent(
"""
{initial_message}
lookback window started at {lookback_start}
earliest known date was {first_date}
{lookback_length} extra rows of data were required
"""
).format(
initial_message=initial_message,
first_date=first_date,
lookback_start=lookback_start,
lookback_length=lookback_length,
)
)
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
class InvalidCalendarName(ZiplineError):
"""
Raised when a calendar with an invalid name is requested.
"""
msg = (
"The requested TradingCalendar, {calendar_name}, does not exist."
)
class CalendarNameCollision(ZiplineError):
"""
Raised when the static calendar registry already has a calendar with a
given name.
"""
msg = (
"A calendar with the name {calendar_name} is already registered."
)
class CyclicCalendarAlias(ZiplineError):
"""
Raised when calendar aliases form a cycle.
"""
msg = "Cycle in calendar aliases: [{cycle}]"
class ScheduleFunctionWithoutCalendar(ZiplineError):
"""
Raised when schedule_function is called but there is not a calendar to be
used in the construction of an event rule.
"""
# TODO update message when new TradingSchedules are built
msg = (
"To use schedule_function, the TradingAlgorithm must be running on an "
"ExchangeTradingSchedule, rather than {schedule}."
)
class ScheduleFunctionInvalidCalendar(ZiplineError):
"""
Raised when schedule_function is called with an invalid calendar argument.
"""
msg = (
"Invalid calendar '{given_calendar}' passed to schedule_function. "
"Allowed options are {allowed_calendars}."
)
class UnsupportedPipelineOutput(ZiplineError):
"""
Raised when a 1D term is added as a column to a pipeline.
"""
msg = (
"Cannot add column {column_name!r} with term {term}. Adding slices or "
"single-column-output terms as pipeline columns is not currently "
"supported."
)
class NonSliceableTerm(ZiplineError):
"""
Raised when attempting to index into a non-sliceable term, e.g. instances
of `zipline.pipeline.term.LoadableTerm`.
"""
msg = "Taking slices of {term} is not currently supported."
class IncompatibleTerms(ZiplineError):
"""
Raised when trying to compute correlations/regressions between two 2D
factors with different masks.
"""
msg = (
"{term_1} and {term_2} must have the same mask in order to compute "
"correlations and regressions asset-wise."
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/errors.py | errors.py |
from warnings import warn
import pandas as pd
from .assets import Asset
from .utils.enum import enum
from ._protocol import BarData, InnerPosition # noqa
class MutableView(object):
"""A mutable view over an "immutable" object.
Parameters
----------
ob : any
The object to take a view over.
"""
# add slots so we don't accidentally add attributes to the view instead of
# ``ob``
__slots__ = ('_mutable_view_ob',)
def __init__(self, ob):
object.__setattr__(self, '_mutable_view_ob', ob)
def __getattr__(self, attr):
return getattr(self._mutable_view_ob, attr)
def __setattr__(self, attr, value):
vars(self._mutable_view_ob)[attr] = value
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._mutable_view_ob)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__.update(initial_values)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
def _deprecated_getitem_method(name, attrs):
"""Create a deprecated ``__getitem__`` method that tells users to use
getattr instead.
Parameters
----------
name : str
The name of the object in the warning message.
attrs : iterable[str]
The set of allowed attributes.
Returns
-------
__getitem__ : callable[any, str]
The ``__getitem__`` method to put in the class dict.
"""
attrs = frozenset(attrs)
msg = (
"'{name}[{attr!r}]' is deprecated, please use"
" '{name}.{attr}' instead"
)
def __getitem__(self, key):
"""``__getitem__`` is deprecated, please use attribute access instead.
"""
warn(msg.format(name=name, attr=key), DeprecationWarning, stacklevel=2)
if key in attrs:
return getattr(self, key)
raise KeyError(key)
return __getitem__
class Order(Event):
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'order', {
'dt',
'sid',
'amount',
'stop',
'limit',
'id',
'filled',
'commission',
'stop_reached',
'limit_reached',
'created',
},
)
class Portfolio(object):
"""The portfolio at a given time.
Parameters
----------
start_date : pd.Timestamp
The start date for the period being recorded.
capital_base : float
The starting value for the portfolio. This will be used as the starting
cash, current cash, and portfolio value.
"""
def __init__(self, start_date=None, capital_base=0.0):
self_ = MutableView(self)
self_.cash_flow = 0.0
self_.starting_cash = capital_base
self_.portfolio_value = capital_base
self_.pnl = 0.0
self_.returns = 0.0
self_.cash = capital_base
self_.positions = Positions()
self_.start_date = start_date
self_.positions_value = 0.0
self_.positions_exposure = 0.0
@property
def capital_used(self):
return self.cash_flow
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Portfolio objects')
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'portfolio', {
'capital_used',
'starting_cash',
'portfolio_value',
'pnl',
'returns',
'cash',
'positions',
'start_date',
'positions_value',
},
)
@property
def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series({
asset: (
position.last_sale_price *
position.amount *
asset.price_multiplier
)
for asset, position in self.positions.items()
})
return position_values / self.portfolio_value
class Account(object):
"""
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
"""
def __init__(self):
self_ = MutableView(self)
self_.settled_cash = 0.0
self_.accrued_interest = 0.0
self_.buying_power = float('inf')
self_.equity_with_loan = 0.0
self_.total_positions_value = 0.0
self_.total_positions_exposure = 0.0
self_.regt_equity = 0.0
self_.regt_margin = float('inf')
self_.initial_margin_requirement = 0.0
self_.maintenance_margin_requirement = 0.0
self_.available_funds = 0.0
self_.excess_liquidity = 0.0
self_.cushion = 0.0
self_.day_trades_remaining = float('inf')
self_.leverage = 0.0
self_.net_leverage = 0.0
self_.net_liquidation = 0.0
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Account objects')
def __repr__(self):
return "Account({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'account', {
'settled_cash',
'accrued_interest',
'buying_power',
'equity_with_loan',
'total_positions_value',
'total_positions_exposure',
'regt_equity',
'regt_margin',
'initial_margin_requirement',
'maintenance_margin_requirement',
'available_funds',
'excess_liquidity',
'cushion',
'day_trades_remaining',
'leverage',
'net_leverage',
'net_liquidation',
},
)
class Position(object):
__slots__ = ('_underlying_position',)
def __init__(self, underlying_position):
object.__setattr__(self, '_underlying_position', underlying_position)
def __getattr__(self, attr):
return getattr(self._underlying_position, attr)
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Position objects')
@property
def sid(self):
# for backwards compatibility
return self.asset
def __repr__(self):
return 'Position(%r)' % {
k: getattr(self, k)
for k in (
'asset',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
)
}
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
# Copied from Position and renamed. This is used to handle cases where a user
# does something like `context.portfolio.positions[100]` instead of
# `context.portfolio.positions[sid(100)]`.
class _DeprecatedSidLookupPosition(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
def __repr__(self):
return "_DeprecatedSidLookupPosition({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
class Positions(dict):
def __missing__(self, key):
if isinstance(key, Asset):
return Position(InnerPosition(key))
elif isinstance(key, int):
warn("Referencing positions by integer is deprecated."
" Use an asset instead.")
else:
warn("Position lookup expected a value of type Asset but got {0}"
" instead.".format(type(key).__name__))
return _DeprecatedSidLookupPosition(key) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/protocol.py | protocol.py |
from distutils.version import StrictVersion
import os
import numpy as np
# This is *not* a place to dump arbitrary classes/modules for convenience,
# it is a place to expose the public interfaces.
from trading_calendars import get_calendar
from . import data
from . import finance
from . import gens
from . import utils
from .utils.numpy_utils import numpy_version
from .utils.pandas_utils import new_pandas
from .utils.run_algo import run_algorithm
from ._version import get_versions
# These need to happen after the other imports.
from . algorithm import TradingAlgorithm
from . import api
from zipline import extensions as ext
from zipline.finance.blotter import Blotter
# PERF: Fire a warning if calendars were instantiated during zipline import.
# Having calendars doesn't break anything per-se, but it makes zipline imports
# noticeably slower, which becomes particularly noticeable in the Zipline CLI.
from trading_calendars.calendar_utils import global_calendar_dispatcher
if global_calendar_dispatcher._calendars:
import warnings
warnings.warn(
"Found TradingCalendar instances after zipline import.\n"
"Zipline startup will be much slower until this is fixed!",
)
del warnings
del global_calendar_dispatcher
__version__ = get_versions()['version']
del get_versions
extension_args = ext.Namespace()
def load_ipython_extension(ipython):
from .__main__ import zipline_magic
ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline')
if os.name == 'nt':
# we need to be able to write to our temp directoy on windows so we
# create a subdir in %TMP% that has write access and use that as %TMP%
def _():
import atexit
import tempfile
tempfile.tempdir = tempdir = tempfile.mkdtemp()
@atexit.register
def cleanup_tempdir():
import shutil
shutil.rmtree(tempdir)
_()
del _
__all__ = [
'Blotter',
'TradingAlgorithm',
'api',
'data',
'finance',
'get_calendar',
'gens',
'run_algorithm',
'utils',
'extension_args'
]
def setup(self,
np=np,
numpy_version=numpy_version,
StrictVersion=StrictVersion,
new_pandas=new_pandas):
"""Lives in zipline.__init__ for doctests."""
legacy_version = '1.13'
if numpy_version > StrictVersion(legacy_version):
self.old_opts = np.get_printoptions()
np.set_printoptions(legacy=legacy_version)
else:
self.old_opts = None
if new_pandas:
self.old_err = np.geterr()
# old pandas has numpy compat that sets this
np.seterr(all='ignore')
else:
self.old_err = None
def teardown(self, np=np):
"""Lives in zipline.__init__ for doctests."""
if self.old_err is not None:
np.seterr(**self.old_err)
if self.old_opts is not None:
np.set_printoptions(**self.old_opts)
del os
del np
del numpy_version
del StrictVersion
del new_pandas | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/__init__.py | __init__.py |
from collections import Iterable, namedtuple
from copy import copy
import warnings
from datetime import tzinfo, time, timedelta
import logbook
import pytz
import pandas as pd
from contextlib2 import ExitStack
import numpy as np
from itertools import chain, repeat
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from trading_calendars.utils.pandas_utils import days_at_time
from trading_calendars import get_calendar
from zipline._protocol import handle_non_market_minutes
from zipline.errors import (
AttachPipelineAfterInitialize,
CannotOrderDelistedAsset,
DuplicatePipelineName,
HistoryInInitialize,
IncompatibleCommissionModel,
IncompatibleSlippageModel,
NoSuchPipeline,
OrderDuringInitialize,
OrderInBeforeTradingStart,
PipelineOutputDuringInitialize,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetBenchmarkOutsideInitialize,
SetCancelPolicyPostInit,
SetCommissionPostInit,
SetSlippagePostInit,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
UnsupportedOrderParameters,
ZeroCapitalError
)
from zipline.finance.blotter import SimulationBlotter
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
MinLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.asset_restrictions import Restrictions
from zipline.finance.cancel_policy import NeverCancel, CancelPolicy
from zipline.finance.asset_restrictions import (
NoRestrictions,
StaticRestrictions,
SecurityListRestrictions,
)
from zipline.assets import Asset, Equity, Future
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.finance.metrics import MetricsTracker, load as load_metrics_set
from zipline.pipeline import Pipeline
from zipline.pipeline.engine import (
ExplodingPipelineEngine,
SimplePipelineEngine,
)
from zipline.utils.api_support import (
api_method,
require_initialized,
require_not_initialized,
ZiplineAPI,
disallowed_in_before_trading_start)
from zipline.utils.input_validation import (
coerce_string,
ensure_upper_case,
error_keywords,
expect_dtypes,
expect_types,
optional,
optionally,
)
from zipline.utils.numpy_utils import int64_dtype
from zipline.utils.pandas_utils import normalize_date
from zipline.utils.cache import ExpiringCache
from zipline.utils.pandas_utils import clear_dataframe_indexer_caches
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
date_rules,
time_rules,
calendars,
AfterOpen,
BeforeClose
)
from zipline.utils.math_utils import (
tolerant_equals,
round_if_near_integer,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.security_list import SecurityList
import zipline.protocol
from zipline.sources.requests_csv import PandasRequestsCSV
from zipline.gens.sim_engine import MinuteSimulationClock
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.zipline_warnings import ZiplineDeprecationWarning
log = logbook.Logger("ZiplineLog")
# For creating and storing pipeline instances
AttachedPipeline = namedtuple('AttachedPipeline', 'pipe chunks eager')
class TradingAlgorithm(object):
"""A class that represents a trading strategy and parameters to execute
the strategy.
Parameters
----------
*args, **kwargs
Forwarded to ``initialize`` unless listed below.
initialize : callable[context -> None], optional
Function that is called at the start of the simulation to
setup the initial context.
handle_data : callable[(context, data) -> None], optional
Function called on every bar. This is where most logic should be
implemented.
before_trading_start : callable[(context, data) -> None], optional
Function that is called before any bars have been processed each
day.
analyze : callable[(context, DataFrame) -> None], optional
Function that is called at the end of the backtest. This is passed
the context and the performance results for the backtest.
teardown : algo method like handle_data() or before_trading_start() that
is called when the algo execution stops and allows the developer
to nicely kill the algo execution.
script : str, optional
Algoscript that contains the definitions for the four algorithm
lifecycle functions and any supporting code.
namespace : dict, optional
The namespace to execute the algoscript in. By default this is an
empty namespace that will include only python built ins.
algo_filename : str, optional
The filename for the algoscript. This will be used in exception
tracebacks. default: '<string>'.
data_frequency : {'daily', 'minute'}, optional
The duration of the bars.
performance_callback : callback[(perf) -> None], optional
A callback to send performance results everyday and not only at the end of the backtest.
this allows to run live, and monitor the performance of the algorithm daily
stop_execution_callback : callback[() -> bool], optional
A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation
could be stopped using this) execution. if the callback returns True, then algo execution will be aborted.
equities_metadata : dict or DataFrame or file-like object, optional
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a ``read`` method is provided, ``read`` must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
futures_metadata : dict or DataFrame or file-like object, optional
The same layout as ``equities_metadata`` except that it is used
for futures information.
identifiers : list, optional
Any asset identifiers that are not provided in the
equities_metadata, but will be traded by this TradingAlgorithm.
get_pipeline_loader : callable[BoundColumn -> PipelineLoader], optional
The function that maps pipeline columns to their loaders.
create_event_context : callable[BarData -> context manager], optional
A function used to create a context mananger that wraps the
execution of all events that are scheduled for a bar.
This function will be passed the data for the bar and should
return the actual context manager that will be entered.
history_container_class : type, optional
The type of history container to use. default: HistoryContainer
platform : str, optional
The platform the simulation is running on. This can be queried for
in the simulation with ``get_environment``. This allows algorithms
to conditionally execute code based on platform it is running on.
default: 'zipline'
adjustment_reader : AdjustmentReader
The interface to the adjustments.
"""
def __init__(self,
sim_params,
data_portal=None,
asset_finder=None,
# Algorithm API
namespace=None,
script=None,
algo_filename=None,
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
teardown=None,
#
trading_calendar=None,
metrics_set=None,
blotter=None,
blotter_class=None,
cancel_policy=None,
benchmark_sid=None,
benchmark_returns=None,
platform='zipline',
capital_changes=None,
get_pipeline_loader=None,
create_event_context=None,
performance_callback=None,
stop_execution_callback=None,
**initialize_kwargs):
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = namespace or {}
self._platform = platform
self.logger = None
# XXX: This is kind of a mess.
# We support passing a data_portal in `run`, but we need an asset
# finder earlier than that to look up assets for things like
# set_benchmark.
self.data_portal = data_portal
if self.data_portal is None:
if asset_finder is None:
raise ValueError(
"Must pass either data_portal or asset_finder "
"to TradingAlgorithm()"
)
self.asset_finder = asset_finder
else:
# Raise an error if we were passed two different asset finders.
# There's no world where that's a good idea.
if asset_finder is not None \
and asset_finder is not data_portal.asset_finder:
raise ValueError(
"Inconsistent asset_finders in TradingAlgorithm()"
)
self.asset_finder = data_portal.asset_finder
self.benchmark_returns = benchmark_returns
# XXX: This is also a mess. We should remove all of this and only allow
# one way to pass a calendar.
#
# We have a required sim_params argument as well as an optional
# trading_calendar argument, but sim_params has a trading_calendar
# attribute. If the user passed trading_calendar explicitly, make sure
# it matches their sim_params. Otherwise, just use what's in their
# sim_params.
self.sim_params = sim_params
if trading_calendar is None:
self.trading_calendar = sim_params.trading_calendar
elif trading_calendar.name == sim_params.trading_calendar.name:
self.trading_calendar = sim_params.trading_calendar
else:
raise ValueError(
"Conflicting calendars: trading_calendar={}, but "
"sim_params.trading_calendar={}".format(
trading_calendar.name,
self.sim_params.trading_calendar.name,
)
)
self.metrics_tracker = None
self._last_sync_time = pd.NaT
self._metrics_set = metrics_set
if self._metrics_set is None:
self._metrics_set = load_metrics_set('default')
# Initialize Pipeline API data.
self.init_engine(get_pipeline_loader)
self._pipelines = {}
# Create an already-expired cache so that we compute the first time
# data is requested.
self._pipeline_cache = ExpiringCache(
cleanup=clear_dataframe_indexer_caches
)
if blotter is not None:
self.blotter = blotter
else:
cancel_policy = cancel_policy or NeverCancel()
blotter_class = blotter_class or SimulationBlotter
self.blotter = blotter_class(cancel_policy=cancel_policy)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
# If string is passed in, execute and get reference to
# functions.
self.algoscript = script
self._initialize = None
self._before_trading_start = None
self._analyze = None
self._performance_callback = None
self._stop_execution_callback = None
self._in_before_trading_start = False
self.event_manager = EventManager(create_event_context)
self._handle_data = None
def noop(*args, **kwargs):
pass
if self.algoscript is not None:
unexpected_api_methods = set()
if initialize is not None:
unexpected_api_methods.add('initialize')
if handle_data is not None:
unexpected_api_methods.add('handle_data')
if before_trading_start is not None:
unexpected_api_methods.add('before_trading_start')
if analyze is not None:
unexpected_api_methods.add('analyze')
if teardown is not None:
unexpected_api_methods.add('teardown')
if unexpected_api_methods:
raise ValueError(
"TradingAlgorithm received a script and the following API"
" methods as functions:\n{funcs}".format(
funcs=unexpected_api_methods,
)
)
if algo_filename is None:
algo_filename = '<string>'
code = compile(self.algoscript, algo_filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize', noop)
self._handle_data = self.namespace.get('handle_data', noop)
self._before_trading_start = self.namespace.get(
'before_trading_start',
)
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
self._teardown = self.namespace.get('teardown')
else:
self._initialize = initialize or (lambda self: None)
self._handle_data = handle_data
self._before_trading_start = before_trading_start
self._analyze = analyze
self._teardown = teardown
self._performance_callback = performance_callback
self._stop_execution_callback = stop_execution_callback
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
if self.sim_params.capital_base <= 0:
raise ZeroCapitalError()
# Prepare the algo for initialization
self.initialized = False
self.initialize_kwargs = initialize_kwargs or {}
self.benchmark_sid = benchmark_sid
# A dictionary of capital changes, keyed by timestamp, indicating the
# target/delta of the capital changes, along with values
self.capital_changes = capital_changes or {}
# A dictionary of the actual capital change deltas, keyed by timestamp
self.capital_change_deltas = {}
self.restrictions = NoRestrictions()
self._backwards_compat_universe = None
def init_engine(self, get_loader):
"""
Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine
"""
if get_loader is not None:
self.engine = SimplePipelineEngine(
get_loader,
self.trading_calendar.all_sessions,
self.asset_finder,
)
else:
self.engine = ExplodingPipelineEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs)
def before_trading_start(self, data):
self.compute_eager_pipelines()
if hasattr(self, "broker"):
# we are live, we need to updated our portfolio from the broker before we start
self.broker._get_positions_from_broker()
if self._before_trading_start is None:
return
self._in_before_trading_start = True
with handle_non_market_minutes(data) if \
self.data_frequency == "minute" else ExitStack():
self._before_trading_start(self, data)
self._in_before_trading_start = False
def handle_data(self, data):
if self._handle_data:
self._handle_data(self, data)
def teardown(self):
if self._teardown:
self._teardown(self)
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage_models={slippage_models},
commission_models={commission_models},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.sim_params.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage_models=repr(self.blotter.slippage_models),
commission_models=repr(self.blotter.commission_models),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.ix[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
)
def _create_benchmark_source(self):
if self.benchmark_sid is not None:
benchmark_asset = self.asset_finder.retrieve_asset(
self.benchmark_sid
)
benchmark_returns = None
else:
if self.benchmark_returns is None:
raise ValueError("Must specify either benchmark_sid "
"or benchmark_returns.")
benchmark_asset = None
# get benchmark info from trading environment, which defaults to
# downloading data from IEX Trading.
benchmark_returns = self.benchmark_returns
return BenchmarkSource(
benchmark_asset=benchmark_asset,
benchmark_returns=benchmark_returns,
trading_calendar=self.trading_calendar,
sessions=self.sim_params.sessions,
data_portal=self.data_portal,
emission_rate=self.sim_params.emission_rate,
)
def _create_metrics_tracker(self):
return MetricsTracker(
trading_calendar=self.trading_calendar,
first_session=self.sim_params.start_session,
last_session=self.sim_params.end_session,
capital_base=self.sim_params.capital_base,
emission_rate=self.sim_params.emission_rate,
data_frequency=self.sim_params.data_frequency,
asset_finder=self.asset_finder,
metrics=self._metrics_set,
)
def _create_generator(self, sim_params):
if sim_params is not None:
self.sim_params = sim_params
self.metrics_tracker = metrics_tracker = self._create_metrics_tracker()
# Set the dt initially to the period start by forcing it to change.
self.on_dt_changed(self.sim_params.start_session)
if not self.initialized:
self.initialize(**self.initialize_kwargs)
self.initialized = True
benchmark_source = self._create_benchmark_source()
self.trading_client = AlgorithmSimulator(
self,
sim_params,
self.data_portal,
self._create_clock(),
benchmark_source,
self.restrictions,
universe_func=self._calculate_universe
)
metrics_tracker.handle_start_of_simulation(benchmark_source)
return self.trading_client.transform()
def _calculate_universe(self):
# this exists to provide backwards compatibility for older,
# deprecated APIs, particularly around the iterability of
# BarData (ie, 'for sid in data`).
if self._backwards_compat_universe is None:
self._backwards_compat_universe = (
self.asset_finder.retrieve_all(self.asset_finder.sids)
)
return self._backwards_compat_universe
def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
def run(self, data_portal=None):
"""Run the algorithm.
"""
# HACK: I don't think we really want to support passing a data portal
# this late in the long term, but this is needed for now for backwards
# compat downstream.
if data_portal is not None:
self.data_portal = data_portal
self.asset_finder = data_portal.asset_finder
elif self.data_portal is None:
raise RuntimeError(
"No data portal in TradingAlgorithm.run().\n"
"Either pass a DataPortal to TradingAlgorithm() or to run()."
)
else:
assert self.asset_finder is not None, \
"Have data portal without asset_finder."
# Create zipline and loop through simulated_trading.
# Each iteration returns a perf dictionary
try:
perfs = []
for perf in self.get_generator():
perfs.append(perf)
if self._performance_callback:
# this is called daily
self._performance_callback(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
finally:
self.data_portal = None
self.metrics_tracker = None
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if perf and 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = pd.DatetimeIndex(
[p['period_close'] for p in daily_perfs], tz='UTC'
)
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
def calculate_capital_changes(self, dt, emission_rate, is_interday,
portfolio_value_adjustment=0.0):
"""
If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes.
"""
try:
capital_change = self.capital_changes[dt]
except KeyError:
return
self._sync_last_sale_prices()
if capital_change['type'] == 'target':
target = capital_change['value']
capital_change_amount = (
target -
(
self.portfolio.portfolio_value -
portfolio_value_adjustment
)
)
log.info('Processing capital change to target %s at %s. Capital '
'change delta is %s' % (target, dt,
capital_change_amount))
elif capital_change['type'] == 'delta':
target = None
capital_change_amount = capital_change['value']
log.info('Processing capital change of delta %s at %s'
% (capital_change_amount, dt))
else:
log.error("Capital change %s does not indicate a valid type "
"('target' or 'delta')" % capital_change)
return
self.capital_change_deltas.update({dt: capital_change_amount})
self.metrics_tracker.capital_change(capital_change_amount)
yield {
'capital_change':
{'date': dt,
'type': 'cash',
'target': target,
'delta': capital_change_amount}
}
@api_method
def get_environment(self, field='platform'):
"""Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option.
"""
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
try:
return env[field]
except KeyError:
raise ValueError(
'%r is not a valid field for get_environment' % field,
)
@api_method
def fetch_csv(self,
url,
pre_func=None,
post_func=None,
date_column='date',
date_format=None,
timezone=pytz.utc.zone,
symbol=None,
mask=True,
symbol_column=None,
special_params_checker=None,
**kwargs):
"""Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified.
"""
# Show all the logs every time fetcher is used.
csv_data_source = PandasRequestsCSV(
url,
pre_func,
post_func,
self.asset_finder,
self.trading_calendar.day,
self.sim_params.start_session,
self.sim_params.end_session,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency=self.data_frequency,
special_params_checker=special_params_checker,
**kwargs
)
# ingest this into dataportal
self.data_portal.handle_extra_source(csv_data_source.df,
self.sim_params)
return csv_data_source
def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
"""Schedules a function to be called according to some timed rules.
Parameters
----------
func : callable[(context, data) -> None]
The function to execute when the rule is triggered.
date_rule : EventRule, optional
The rule for the dates to execute this function.
time_rule : EventRule, optional
The rule for the times to execute this function.
half_days : bool, optional
Should this rule fire on half days?
calendar : Sentinel, optional
Calendar used to reconcile date and time rules.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules`
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn('Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule', stacklevel=3)
date_rule = date_rule or date_rules.every_day()
time_rule = ((time_rule or time_rules.every_minute())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute())
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar('NYSE')
elif calendar is calendars.US_FUTURES:
cal = get_calendar('us_futures')
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=(
'[calendars.US_EQUITIES, calendars.US_FUTURES]'
),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""Track and record values each day.
Parameters
----------
**kwargs
The names and values to record.
Notes
-----
These values will appear in the performance packets and the performance
dataframe passed to ``analyze`` and returned from
:func:`~zipline.run_algorithm`.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def set_benchmark(self, benchmark):
"""Set the benchmark asset.
Parameters
----------
benchmark : Asset
The asset to set as the new benchmark.
Notes
-----
Any dividends payed out for that new benchmark asset will be
automatically reinvested.
"""
if self.initialized:
raise SetBenchmarkOutsideInitialize()
self.benchmark_sid = benchmark
@api_method
@preprocess(root_symbol_str=ensure_upper_case)
def continuous_future(self,
root_symbol_str,
offset=0,
roll='volume',
adjustment='mul'):
"""Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : ContinuousFuture
The continuous future specifier.
"""
return self.asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
)
@api_method
@preprocess(
symbol_str=ensure_upper_case,
country_code=optionally(ensure_upper_case),
)
def symbol(self, symbol_str, country_code=None):
"""Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for symbol->sid resolution.
_lookup_date = self._symbol_lookup_date \
if self._symbol_lookup_date is not None \
else self.sim_params.end_session
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
country_code=country_code,
)
@api_method
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args]
@api_method
def sid(self, sid):
"""Lookup an Asset by its unique asset identifier.
Parameters
----------
sid : int
The unique integer that identifies an asset.
Returns
-------
asset : Asset
The asset with the given ``sid``.
Raises
------
SidsNotFound
When a requested ``sid`` does not map to any asset.
"""
return self.asset_finder.retrieve_asset(sid)
@api_method
@preprocess(symbol=ensure_upper_case)
def future_symbol(self, symbol):
"""Lookup a futures contract with a given symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future that trades with the name ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
return self.asset_finder.lookup_future_symbol(symbol)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
# Make sure the asset exists, and that there is a last price for it.
# FIXME: we should use BarData's can_trade logic here, but I haven't
# yet found a good way to do that.
normalized_date = normalize_date(self.datetime)
if normalized_date < asset.start_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it started trading on"
" {1}.".format(asset.symbol, asset.start_date)
)
elif normalized_date > asset.end_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it stopped trading on"
" {1}.".format(asset.symbol, asset.end_date)
)
else:
last_price = \
self.trading_client.current_data.current(asset, "price")
if np.isnan(last_price):
raise CannotOrderDelistedAsset(
msg="Cannot order {0} on {1} as there is no last "
"price for the security.".format(asset.symbol,
self.datetime)
)
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
value_multiplier = asset.price_multiplier
return value / (last_price * value_multiplier)
def _can_order_asset(self, asset):
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
if asset.auto_close_date:
day = normalize_date(self.get_datetime())
end_date = min(asset.end_date, asset.auto_close_date)
if isinstance(end_date, str):
from dateutil import parser
end_date = parser.parse(end_date).replace(tzinfo=pytz.UTC)
# when we use pipeline live the end date is always yesterday so we add 5 days to still keep this condition
# but also allowing to use pipeline live as well. 5 is a good number for weekends/holidays
if day > end_date + timedelta(days=5):
# If we are after the asset's end date or auto close date, warn
# the user that they can't place an order for this asset, and
# return None.
log.warn("Cannot place order for {0}, as it has de-listed. "
"Any existing positions for this asset will be "
"liquidated on "
"{1}.".format(asset.symbol, asset.auto_close_date))
return False
return True
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order(self,
asset,
amount,
limit_price=None,
stop_price=None,
style=None):
"""Place an order.
Parameters
----------
asset : Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle, optional
The execution style for the order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
The ``limit_price`` and ``stop_price`` arguments provide shorthands for
passing common execution styles. Passing ``limit_price=N`` is
equivalent to ``style=LimitOrder(N)``. Similarly, passing
``stop_price=M`` is equivalent to ``style=StopOrder(M)``, and passing
``limit_price=N`` and ``stop_price=M`` is equivalent to
``style=StopLimitOrder(N, M)``. It is an error to pass both a ``style``
and ``limit_price`` or ``stop_price``.
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order_value`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount, style = self._calculate_order(asset, amount,
limit_price, stop_price, style)
return self.blotter.order(asset, amount, style)
def _calculate_order(self, asset, amount,
limit_price=None, stop_price=None, style=None):
amount = self.round_order(amount)
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(asset,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style)
return amount, style
@staticmethod
def round_order(amount):
"""
Convert number of shares to an integer.
By default, truncates to the integer share count that's either within
.0001 of amount or closer to zero.
E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
"""
return int(round_if_near_integer(amount))
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.portfolio,
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price, asset=asset)
if limit_price:
return LimitOrder(limit_price, asset=asset)
if stop_price:
return StopOrder(stop_price, asset=asset)
else:
return MarketOrder()
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_value(self,
asset,
value,
limit_price=None,
stop_price=None,
style=None):
"""Place an order by desired value rather than desired number of
shares.
Parameters
----------
asset : Asset
The asset that this order is for.
value : float
If the requested asset exists, the requested value is
divided by its price to imply the number of shares to transact.
If the Asset being ordered is a Future, the 'value' calculated
is actually the exposure, as Futures have no 'value'.
value > 0 :: Buy/Cover
value < 0 :: Sell/Short
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_value_amount(asset, value)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt
@property
def portfolio(self):
self._sync_last_sale_prices()
return self.metrics_tracker.portfolio
@property
def account(self):
self._sync_last_sale_prices()
return self.metrics_tracker.account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
self.datetime = dt
self.blotter.set_date(dt)
@api_method
@preprocess(tz=coerce_string(pytz.timezone))
@expect_types(tz=optional(tzinfo))
def get_datetime(self, tz=None):
"""
Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
dt = dt.astimezone(tz)
return dt
@api_method
def set_slippage(self, us_equities=None, us_futures=None):
"""Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures
@api_method
def set_commission(self, us_equities=None, us_futures=None):
"""Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar`
"""
if self.initialized:
raise SetCommissionPostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.commission_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.commission_models[Future] = us_futures
@api_method
def set_cancel_policy(self, cancel_policy):
"""Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel`
"""
if not isinstance(cancel_policy, CancelPolicy):
raise UnsupportedCancelPolicy()
if self.initialized:
raise SetCancelPolicyPostInit()
self.blotter.cancel_policy = cancel_policy
@api_method
def set_symbol_lookup_date(self, dt):
"""Set the date for which symbols will be resolved to their assets
(symbols may map to different firms or underlying assets at
different times)
Parameters
----------
dt : datetime
The new symbol lookup date.
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt, tz='UTC')
except ValueError:
raise UnsupportedDatetimeFormat(input=dt,
method='set_symbol_lookup_date')
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_percent(self,
asset,
percent,
limit_price=None,
stop_price=None,
style=None):
"""Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_percent_amount(asset, percent)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_percent_amount(self, asset, percent):
value = self.portfolio.portfolio_value * percent
return self._calculate_order_value_amount(asset, value)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_target_amount(self, asset, target):
if asset in self.portfolio.positions:
current_position = self.portfolio.positions[asset].amount
target -= current_position
return target
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_percent(self, asset, target,
limit_price=None, stop_price=None, style=None):
"""Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_percent_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_target_percent_amount(self, asset, target):
target_amount = self._calculate_order_percent_amount(asset, target)
return self._calculate_order_target_amount(asset, target_amount)
@api_method
@expect_types(share_counts=pd.Series)
@expect_dtypes(share_counts=int64_dtype)
def batch_market_order(self, share_counts):
"""Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders.
"""
style = MarketOrder()
order_args = [
(asset, amount, style)
for (asset, amount) in iteritems(share_counts)
if amount
]
return self.blotter.batch_order(order_args)
@error_keywords(sid='Keyword argument `sid` is no longer supported for '
'get_open_orders. Use `asset` instead.')
@api_method
def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
if asset is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if asset in self.blotter.open_orders:
orders = self.blotter.open_orders[asset]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
@require_initialized(HistoryInInitialize())
def history(self, bar_count, frequency, field, ffill=True):
"""DEPRECATED: use ``data.history`` instead.
"""
warnings.warn(
"The `history` method is deprecated. Use `data.history` instead.",
category=ZiplineDeprecationWarning,
stacklevel=4
)
return self.get_history_window(
bar_count,
frequency,
self._calculate_universe(),
field,
ffill
)
def get_history_window(self, bar_count, frequency, assets, field, ffill):
if not self._in_before_trading_start:
return self.data_portal.get_history_window(
assets,
self.datetime,
bar_count,
frequency,
field,
self.data_frequency,
ffill,
)
else:
# If we are in before_trading_start, we need to get the window
# as of the previous market minute
adjusted_dt = \
self.trading_calendar.previous_minute(
self.datetime
)
window = self.data_portal.get_history_window(
assets,
adjusted_dt,
bar_count,
frequency,
field,
self.data_frequency,
ffill,
)
# Get the adjustments between the last market minute and the
# current before_trading_start dt and apply to the window
adjs = self.data_portal.get_adjustments(
assets,
field,
adjusted_dt,
self.datetime
)
window = window * adjs
return window
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.portfolio,
self.account,
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage):
"""Set a limit on the maximum leverage of the algorithm.
Parameters
----------
max_leverage : float
The maximum leverage for the algorithm. If not provided there will
be no maximum.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
@api_method
def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares to hold for an asset.
max_notional : float, optional
The maximum value to hold for an asset.
"""
control = MaxPositionSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_size(self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares that can be ordered at one time.
max_notional : float, optional
The maximum value that can be ordered at one time.
"""
control = MaxOrderSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count, on_error='fail'):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn(
"`set_do_not_order_list(security_lists.leveraged_etf_list)` "
"is deprecated. Use `set_asset_restrictions("
"security_lists.restrict_leveraged_etfs)` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2
)
restrictions = SecurityListRestrictions(restricted_list)
else:
warnings.warn(
"`set_do_not_order_list(container_of_assets)` is deprecated. "
"Create a zipline.finance.asset_restrictions."
"StaticRestrictions object with a container of assets and use "
"`set_asset_restrictions(StaticRestrictions("
"container_of_assets))` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2
)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error)
@api_method
@expect_types(
restrictions=Restrictions,
on_error=str,
)
def set_asset_restrictions(self, restrictions, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : Restrictions
An object providing information about restricted assets.
See Also
--------
zipline.finance.asset_restrictions.Restrictions
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error='fail'):
"""Set a rule specifying that this algorithm cannot take short
positions.
"""
self.register_trading_control(LongOnly(on_error))
##############
# Pipeline API
##############
@api_method
@expect_types(
pipeline=Pipeline,
name=string_types,
chunks=(int, Iterable, type(None)),
)
def attach_pipeline(self, pipeline, name, chunks=None, eager=True):
"""Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output`
"""
if chunks is None:
# Make the first chunk smaller to get more immediate results:
# (one week, then every half year)
chunks = chain([5], repeat(126))
elif isinstance(chunks, int):
chunks = repeat(chunks)
if name in self._pipelines:
raise DuplicatePipelineName(name=name)
self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager)
log.info('Pipeline {} attached'.format(name))
# Return the pipeline to allow expressions like
# p = attach_pipeline(Pipeline(), 'name')
return pipeline
@api_method
@require_initialized(PipelineOutputDuringInitialize())
def pipeline_output(self, name):
"""Get the results of the pipeline that was attached with the name:
``name``.
Parameters
----------
name : str
Name of the pipeline for which results are requested.
Returns
-------
results : pd.DataFrame
DataFrame containing the results of the requested pipeline for
the current simulation date.
Raises
------
NoSuchPipeline
Raised when no pipeline with the name `name` has been registered.
See Also
--------
:func:`zipline.api.attach_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
try:
pipe, chunks, _ = self._pipelines[name]
except KeyError:
raise NoSuchPipeline(
name=name,
valid=list(self._pipelines.keys()),
)
return self._pipeline_output(pipe, chunks, name)
def _pipeline_output(self, pipeline, chunks, name):
"""
Internal implementation of `pipeline_output`.
"""
today = normalize_date(self.get_datetime())
try:
data = self._pipeline_cache.get(name, today)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, today, next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[today]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns)
def run_pipeline(self, pipeline, start_session, chunksize):
"""
Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline
"""
sessions = self.trading_calendar.all_sessions
# Load data starting from the previous trading day...
start_date_loc = sessions.get_loc(start_session)
# ...continuing until either the day before the simulation end, or
# until chunksize days of data have been loaded.
sim_end_session = self.sim_params.end_session
end_loc = min(
start_date_loc + chunksize,
sessions.get_loc(sim_end_session)
)
end_session = sessions[end_loc]
return \
self.engine.run_pipeline(pipeline, start_session, end_session), \
end_session
##################
# End Pipeline API
##################
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/algorithm.py | algorithm.py |
from contextlib2 import ExitStack
from copy import copy
from logbook import Logger, Processor
from zipline.finance.order import ORDER_STATUS
from zipline.protocol import BarData
from zipline.utils.api_support import ZiplineAPI
from six import viewkeys
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR,
MARKETS_CLOSED,
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params, data_portal, clock, benchmark_source,
restrictions, universe_func):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
self.data_portal = data_portal
self.restrictions = restrictions
# ==============
# Algo Setup
# ==============
self.algo = algo
# ==============
# Snapshot Setup
# ==============
# This object is the way that user algorithms interact with OHLCV data,
# fetcher data, and some API methods like `data.can_trade`.
self.current_data = self._create_bar_data(universe_func)
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.clock = clock
self.benchmark_source = benchmark_source
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def get_simulation_dt(self):
return self.simulation_dt
def _create_bar_data(self, universe_func):
return BarData(
data_portal=self.data_portal,
simulation_dt_func=self.get_simulation_dt,
data_frequency=self.sim_params.data_frequency,
trading_calendar=self.algo.trading_calendar,
restrictions=self.restrictions,
universe_func=universe_func
)
def transform(self):
"""
Main generator work loop.
"""
algo = self.algo
metrics_tracker = algo.metrics_tracker
emission_rate = metrics_tracker.emission_rate
def every_bar(dt_to_use, current_data=self.current_data,
handle_data=algo.event_manager.handle_data):
for capital_change in calculate_minute_capital_changes(dt_to_use):
yield capital_change
self.simulation_dt = dt_to_use
# called every tick (minute or day).
algo.on_dt_changed(dt_to_use)
blotter = algo.blotter
# handle any transactions and commissions coming out new orders
# placed in the last bar
new_transactions, new_commissions, closed_orders = \
blotter.get_transactions(current_data)
blotter.prune_orders(closed_orders)
for transaction in new_transactions:
metrics_tracker.process_transaction(transaction)
# since this order was modified, record it
order = blotter.orders[transaction.order_id]
metrics_tracker.process_order(order)
for commission in new_commissions:
metrics_tracker.process_commission(commission)
handle_data(algo, current_data, dt_to_use)
# grab any new orders from the blotter, then clear the list.
# this includes cancelled orders.
new_orders = blotter.new_orders
blotter.new_orders = []
# if we have any new orders, record them so that we know
# in what perf period they were placed.
for new_order in new_orders:
metrics_tracker.process_order(new_order)
def once_a_day(midnight_dt, current_data=self.current_data,
data_portal=self.data_portal):
# process any capital changes that came overnight
for capital_change in algo.calculate_capital_changes(
midnight_dt, emission_rate=emission_rate,
is_interday=True):
yield capital_change
# set all the timestamps
self.simulation_dt = midnight_dt
algo.on_dt_changed(midnight_dt)
metrics_tracker.handle_market_open(
midnight_dt,
algo.data_portal,
)
# handle any splits that impact any positions or any open orders.
assets_we_care_about = (
viewkeys(metrics_tracker.positions) |
viewkeys(algo.blotter.open_orders)
)
if assets_we_care_about:
splits = data_portal.get_splits(assets_we_care_about,
midnight_dt)
if splits:
algo.blotter.process_splits(splits)
metrics_tracker.handle_splits(splits)
def on_exit():
# Remove references to algo, data portal, et al to break cycles
# and ensure deterministic cleanup of these objects when the
# simulation finishes.
self.algo = None
self.benchmark_source = self.current_data = self.data_portal = None
with ExitStack() as stack:
stack.callback(on_exit)
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
if algo.data_frequency == 'minute':
def execute_order_cancellation_policy():
algo.blotter.execute_cancel_policy(SESSION_END)
def calculate_minute_capital_changes(dt):
# process any capital changes that came between the last
# and current minutes
return algo.calculate_capital_changes(
dt, emission_rate=emission_rate, is_interday=False)
else:
def execute_order_cancellation_policy():
pass
def calculate_minute_capital_changes(dt):
return []
for dt, action in self.clock:
if action == BAR:
self.algo.markets_open = True
for capital_change_packet in every_bar(dt):
yield capital_change_packet
elif action == SESSION_START:
for capital_change_packet in once_a_day(dt):
yield capital_change_packet
elif action == SESSION_END:
# End of the session.
positions = metrics_tracker.positions
position_assets = algo.asset_finder.retrieve_all(positions)
self._cleanup_expired_assets(dt, position_assets)
execute_order_cancellation_policy()
algo.validate_account_controls()
yield self._get_daily_message(dt, algo, metrics_tracker)
elif action == BEFORE_TRADING_START_BAR:
self.simulation_dt = dt
algo.on_dt_changed(dt)
algo.before_trading_start(self.current_data)
elif action == MINUTE_END:
minute_msg = self._get_minute_message(
dt,
algo,
metrics_tracker,
)
yield minute_msg
elif action == MARKETS_CLOSED:
#log.info("Markets are closed!")
self.algo.markets_open = False
for capital_change_packet in every_bar(dt):
yield capital_change_packet
pass
risk_message = metrics_tracker.handle_simulation_end(
self.data_portal,
)
yield risk_message
def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = \
[asset for asset in position_assets if past_auto_close_date(asset)]
metrics_tracker = algo.metrics_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
metrics_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their auto close
# date. These orders get processed immediately because otherwise they
# would not be processed until the first bar of the next day.
blotter = algo.blotter
assets_to_cancel = [
asset for asset in blotter.open_orders
if past_auto_close_date(asset)
]
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
# Make a copy here so that we are not modifying the list that is being
# iterated over.
for order in copy(blotter.new_orders):
if order.status == ORDER_STATUS.CANCELLED:
metrics_tracker.process_order(order)
blotter.new_orders.remove(order)
def _get_daily_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
perf_message = metrics_tracker.handle_market_close(
dt,
self.data_portal,
)
perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars
return perf_message
def _get_minute_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
rvars = algo.recorded_vars
minute_message = metrics_tracker.handle_minute_close(
dt,
self.data_portal,
)
minute_message['minute_perf']['recorded_vars'] = rvars
return minute_message | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/gens/tradesimulation.py | tradesimulation.py |
from time import sleep
from logbook import Logger
import pandas as pd
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR,
MARKETS_CLOSED
)
log = Logger('Realtime Clock')
class RealtimeClock(object):
"""
Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
The key difference between the two is that the RealtimeClock's event
emission is synchronized to the (broker's) wall time clock, while
MinuteSimulationClock yields a new event on every iteration (regardless of
wall clock).
The :param:`time_skew` parameter represents the time difference between
the Broker and the live trading machine's clock.
"""
def __init__(self,
sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission,
time_skew=pd.Timedelta("0s"),
is_broker_alive=None,
execution_id=None,
stop_execution_callback=None):
today = pd.to_datetime('now', utc=True).date()
beginning_of_today = pd.to_datetime(today, utc=True)
self.sessions = sessions[(beginning_of_today <= sessions)]
self.execution_opens = execution_opens[(beginning_of_today <= execution_opens)]
self.execution_closes = execution_closes[(beginning_of_today <= execution_closes)]
self.before_trading_start_minutes = before_trading_start_minutes[
(beginning_of_today <= before_trading_start_minutes)]
self.minute_emission = minute_emission
self.time_skew = time_skew
self.is_broker_alive = is_broker_alive or (lambda: True)
self._last_emit = None
self._before_trading_start_bar_yielded = False
self._execution_id = execution_id
self._stop_execution_callback = stop_execution_callback
def __iter__(self):
# yield from self.work_when_out_of_trading_hours()
# return
if not len(self.sessions):
return
for index, session in enumerate(self.sessions):
self._before_trading_start_bar_yielded = False
yield session, SESSION_START
if self._stop_execution_callback:
if self._stop_execution_callback(self._execution_id):
break
while self.is_broker_alive():
if self._stop_execution_callback: # put it here too, to break inner loop as well
if self._stop_execution_callback(self._execution_id):
break
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time >= self.before_trading_start_minutes[index] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
yield server_time, BEFORE_TRADING_START_BAR
elif (server_time < self.execution_opens[index].tz_localize('UTC') and index == 0) or \
(self.execution_closes[index - 1].tz_localize('UTC') <= server_time <
self.execution_opens[index].tz_localize('UTC')):
# sleep anywhere between yesterday's close and today's open
sleep(1)
# self._last_emit = server_time
# yield server_time, MARKETS_CLOSED
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, MARKETS_CLOSED
# if self.minute_emission:
# yield server_time, MINUTE_END
else:
sleep(1)
elif (self.execution_opens[index].tz_localize('UTC') <= server_time <
self.execution_closes[index].tz_localize('UTC')):
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
else:
sleep(1)
elif server_time == self.execution_closes[index].tz_localize('UTC'):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
break
elif server_time > self.execution_closes[index].tz_localize('UTC'):
break
else:
# We should never end up in this branch
raise RuntimeError("Invalid state in RealtimeClock")
def work_when_out_of_trading_hours(self):
"""
a debugging method to work while outside trading hours, so we are still able to make the engine work
:return:
"""
from datetime import timedelta
num_days = 5
from trading_calendars import get_calendar
self.sessions = get_calendar("NYSE").sessions_in_range(
str(pd.to_datetime('now', utc=True).date() - timedelta(days=num_days * 2)),
str(pd.to_datetime('now', utc=True).date() + timedelta(days=num_days * 2))
)
# for day in range(num_days, 0, -1):
for day in range(0, 1):
# current_time = pd.to_datetime('now', utc=True)
current_time = pd.to_datetime('2018/08/25', utc=True)
# server_time = (current_time + self.time_skew).floor('1 min') - timedelta(days=day)
server_time = (current_time + self.time_skew).floor('1 min') + timedelta(days=day)
# yield self.sessions[-1 - day], SESSION_START
yield self.sessions[day], SESSION_START
yield server_time, BEFORE_TRADING_START_BAR
should_end_day = True
counter = 0
num_minutes = 6 * 60
minute_list = []
for i in range(num_minutes + 1):
minute_list.append(pd.to_datetime("13:31", utc=True) + timedelta(minutes=i))
while self.is_broker_alive():
# current_time = pd.to_datetime('now', utc=True)
# server_time = (current_time + self.time_skew).floor('1 min')
# server_time = minute_list[counter] - timedelta(days=day)
server_time = minute_list[counter] + timedelta(days=day)
if counter >= num_minutes and should_end_day:
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
break
if self._stop_execution_callback:
if self._stop_execution_callback(self._execution_id):
break
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
counter += 1
if self.minute_emission:
yield server_time, MINUTE_END
sleep(0.5) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/gens/realtimeclock.py | realtimeclock.py |
import alpaca_trade_api as tradeapi
from zipline.gens.brokers.broker import Broker
import zipline.protocol as zp
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
import pandas as pd
import numpy as np
import uuid
from logbook import Logger
import sys
if sys.version_info > (3,):
long = int
log = Logger('Alpaca Broker')
NY = 'America/New_York'
class ALPACABroker(Broker):
'''
Broker class for Alpaca.
The uri parameter is not used. Instead, the API key must be
set via environment variables (APCA_API_KEY_ID and APCA_API_SECRET_KEY).
Orders are identified by the UUID (v4) generated here and
associated in the broker side using client_order_id attribute.
Currently this class makes use of REST API only, but websocket
streaming can possibly used too.
'''
def __init__(self, uri):
self._api = tradeapi.REST()
def subscribe_to_market_data(self, asset):
'''Do nothing to comply the interface'''
pass
def subscribed_assets(self):
'''Do nothing to comply the interface'''
return []
@property
def positions(self):
z_positions = zp.Positions()
positions = self._api.list_positions()
position_map = {}
symbols = []
for pos in positions:
symbol = pos.symbol
try:
z_position = zp.Position(symbol_lookup(symbol))
except SymbolNotFound:
continue
z_position.amount = pos.qty
z_position.cost_basis = float(pos.cost_basis)
z_position.last_sale_price = None
z_position.last_sale_date = None
z_positions[symbol_lookup(symbol)] = z_position
symbols.append(symbol)
position_map[symbol] = z_position
quotes = self._api.list_quotes(symbols)
for quote in quotes:
price = quote.last
dt = quote.last_timestamp
z_position = position_map[quote.symbol]
z_position.last_sale_price = float(price)
z_position.last_sale_date = dt
return z_positions
@property
def portfolio(self):
account = self._api.get_account()
z_portfolio = zp.Portfolio()
z_portfolio.cash = float(account.cash)
z_portfolio.positions = self.positions
z_portfolio.positions_value = float(
account.portfolio_value) - float(account.cash)
z_portfolio.portfolio_value = float(account.portfolio_value)
return z_portfolio
@property
def account(self):
account = self._api.get_account()
z_account = zp.Account()
z_account.buying_power = float(account.cash)
z_account.total_position_value = float(
account.portfolio_value) - float(account.cash)
z_account.net_liquidation = account.portfolio_value
return z_account
@property
def time_skew(self):
return pd.Timedelta('0 sec') # TODO: use clock API
def is_alive(self):
try:
self._api.get_account()
return True
except BaseException:
return False
def _order2zp(self, order):
zp_order = ZPOrder(
id=order.client_order_id,
asset=symbol_lookup(order.symbol),
amount=int(order.qty) if order.side == 'buy' else -int(order.qty),
stop=float(order.stop_price) if order.stop_price else None,
limit=float(order.limit_price) if order.limit_price else None,
dt=order.submitted_at,
commission=0,
)
zp_order.status = ZP_ORDER_STATUS.OPEN
if order.canceled_at:
zp_order.status = ZP_ORDER_STATUS.CANCELLED
if order.failed_at:
zp_order.status = ZP_ORDER_STATUS.REJECTED
if order.filled_at:
zp_order.status = ZP_ORDER_STATUS.FILLED
zp_order.filled = int(order.filled_qty)
return zp_order
def _new_order_id(self):
return uuid.uuid4().hex
def order(self, asset, amount, style):
symbol = asset.symbol
qty = amount if amount > 0 else -amount
side = 'buy' if amount > 0 else 'sell'
order_type = 'market'
if isinstance(style, MarketOrder):
order_type = 'market'
elif isinstance(style, LimitOrder):
order_type = 'limit'
elif isinstance(style, StopOrder):
order_type = 'stop'
elif isinstance(style, StopLimitOrder):
order_type = 'stop_limit'
limit_price = style.get_limit_price(side == 'buy') or None
stop_price = style.get_stop_price(side == 'buy') or None
zp_order_id = self._new_order_id()
dt = pd.to_datetime('now', utc=True)
zp_order = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id,
)
order = self._api.submit_order(
symbol=symbol,
qty=qty,
side=side,
type=order_type,
time_in_force='day',
limit_price=limit_price,
stop_price=stop_price,
client_order_id=zp_order.id,
)
zp_order = self._order2zp(order)
return zp_order
@property
def orders(self):
return {
o.client_order_id: self._order2zp(o)
for o in self._api.list_orders('all')
}
@property
def transactions(self):
orders = self._api.list_orders(status='closed')
results = {}
for order in orders:
if order.filled_at is None:
continue
tx = Transaction(
asset=symbol_lookup(order.symbol),
amount=int(order.filled_qty),
dt=order.filled_at,
price=float(order.filled_avg_price),
order_id=order.client_order_id)
results[order.client_order_id] = tx
return results
def cancel_order(self, zp_order_id):
try:
order = self._api.get_order_by_client_order_id(zp_order_id)
self._api.cancel_order(order.id)
except Exception as e:
log.error(e)
return
def get_last_traded_dt(self, asset):
quote = self._api.get_quote(asset.symbol)
return pd.Timestamp(quote.last_timestamp)
def get_spot_value(self, assets, field, dt, data_frequency):
assert(field in (
'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'))
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
if field in ('price', 'last_traded'):
quotes = self._api.list_quotes(symbols)
if assets_is_scalar:
if field == 'price':
if len(quotes) == 0:
return np.nan
return quotes[-1].last
else:
if len(quotes) == 0:
return pd.NaT
return quotes[-1].last_timestamp
else:
return [
quote.last if field == 'price' else quote.last_timestamp
for quote in quotes
]
bars_list = self._api.list_bars(symbols, '1Min', limit=1)
if assets_is_scalar:
if len(bars_list) == 0:
return np.nan
return bars_list[0].bars[-1]._raw[field]
bars_map = {a.symbol: a for a in bars_list}
return [
bars_map[symbol].bars[-1]._raw[field]
for symbol in symbols
]
def get_realtime_bars(self, assets, data_frequency):
# TODO: cache the result. The caller
# (DataPortalLive#get_history_window) makes use of only one
# column at a time.
assets_is_scalar = not isinstance(assets, (list, set, tuple))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
timeframe = '1D' if is_daily else '1Min'
bars_list = self._api.list_bars(symbols, timeframe, limit=500)
bars_map = {a.symbol: a for a in bars_list}
dfs = []
for asset in assets if not assets_is_scalar else [assets]:
symbol = asset.symbol
df = bars_map[symbol].df.copy()
if df.index.tz is None:
df.index = df.index.tz_localize(
'utc').tz_convert('America/New_York')
df.columns = pd.MultiIndex.from_product([[asset, ], df.columns])
dfs.append(df)
return pd.concat(dfs, axis=1) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/gens/brokers/alpaca_broker.py | alpaca_broker.py |
from logbook import Logger
from collections import namedtuple, defaultdict, OrderedDict
from ib_insync import *
import pandas as pd
import numpy as np
import pytz
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
from six import iteritems, itervalues
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.protocol import MutableView
from math import fabs
import sys
log = Logger('IB Broker (ib_insync)')
Position = namedtuple('Position', ['contract', 'position', 'market_price',
'market_value', 'average_cost',
'unrealized_pnl', 'realized_pnl',
'account_name'])
symbol_to_exchange = defaultdict(lambda: 'SMART')
symbol_to_exchange['VIX'] = 'CBOE'
symbol_to_exchange['SPX'] = 'CBOE'
symbol_to_exchange['VIX3M'] = 'CBOE'
symbol_to_exchange['VXST'] = 'CBOE'
symbol_to_exchange['VXMT'] = 'CBOE'
symbol_to_exchange['GVZ'] = 'CBOE'
symbol_to_exchange['GLD'] = 'ARCA'
symbol_to_exchange['GDX'] = 'ARCA'
symbol_to_exchange['GPRO'] = 'SMART/NASDAQ'
symbol_to_exchange['MSFT'] = 'SMART/NASDAQ'
symbol_to_exchange['CSCO'] = 'SMART/NASDAQ'
symbol_to_sec_type = defaultdict(lambda: 'STK')
symbol_to_sec_type['VIX'] = 'IND'
symbol_to_sec_type['VIX3M'] = 'IND'
symbol_to_sec_type['VXST'] = 'IND'
symbol_to_sec_type['VXMT'] = 'IND'
symbol_to_sec_type['GVZ'] = 'IND'
symbol_to_sec_type['SPX'] = 'IND'
wait_step = 0.1
max_wait_cycles = 100
class IBBroker(IB):
def __init__(self, tws_uri, account_id=None, marketDataType=3):
# watchout - market data type is set to 'delayed' by default!
super(self.__class__, self).__init__()
self._tws_uri = tws_uri
self.host, self.port, self.client_id = self._tws_uri.split(':')
self._orders = {}
self._transactions = {}
self._next_ticker_id = 0
self._next_order_id = None
self.symbol_to_ticker_id = {}
self.ticker_id_to_symbol = {}
self.metrics_tracker = None
self.currency = 'USD'
self.time_skew = None
self.account_id = None
self.open_orders = {}
self.order_statuses = {}
self.executions = defaultdict(OrderedDict)
self.commissions = defaultdict(OrderedDict)
self._execution_to_order_id = {}
self._subscribed_assets = []
self.openOrderEvent += self.openOrder
self.orderStatusEvent += self.orderStatus
self.execDetailsEvent += self.execDetails
self.commissionReportEvent += self.commissionReport
self.errorEvent += self.error
try:
self.connect(self.host, self.port, self.client_id)
except Exception as e:
log.error(f"Can't connect to TWS")
return
self.managed_accounts = self.managedAccounts()
log.info("Managed accounts: {}".format(self.managed_accounts))
time = self.reqCurrentTime()
self.time_skew = (pd.to_datetime('now', utc=True) -
time.replace(tzinfo=pytz.utc))
log.info("Local-Broker Time Skew: {}".format(self.time_skew))
self.account_id = (self.client.getAccounts()[0] if account_id is None
else account_id)
self.reqMarketDataType(marketDataType)
def error(self, reqId, errorCode, errorString, contract):
# this is just the error-handler
pass
# log.info(f'{reqId}: {errorCode}: {errorString}' + '' if not contract else f'{contract}')
def execDetails(self, trade, fill):
order_id, exec_id = fill.execution.orderId, fill.execution.execId
self.executions[order_id][exec_id] = dict (req_id = order_id,
contract = fill.contract,
exec_detail = fill.execution)
self._execution_to_order_id[exec_id] = order_id
log.info(
"Order-{order_id} executed @ {exec_time}: "
"{symbol} current: {shares} @ ${price} "
"total: {cum_qty} @ ${avg_price} "
"exec_id: {exec_id} by client-{client_id}".format(
order_id=order_id, exec_id=exec_id,
exec_time=pd.to_datetime(fill.execution.time),
symbol=fill.contract.symbol,
shares=fill.execution.shares,
price=fill.execution.price,
cum_qty=fill.execution.cumQty,
avg_price=fill.execution.avgPrice,
client_id=fill.execution.clientId))
def commissionReport(self, trade, fill, commission_report):
exec_id = commission_report.execId
# we need this check for the case when IB is sending report for the
# order which was placed by another session out of market hours
# in this case current session does not have info on the exec_id
self.execDetails (trade, fill)
order_id = self._execution_to_order_id[commission_report.execId]
self.commissions[order_id][exec_id] = commission_report
log.debug(
"Order-{order_id} report: "
"realized_pnl: ${realized_pnl} "
"commission: ${commission} yield: {yield_} "
"exec_id: {exec_id}".format(
order_id=order_id,
exec_id=commission_report.execId,
realized_pnl=commission_report.realizedPNL
if commission_report.realizedPNL != sys.float_info.max
else 0,
commission=commission_report.commission,
yield_=commission_report.yield_
if commission_report.yield_ != sys.float_info.max
else 0)
)
def openOrder(self, trade):
self.open_orders[trade.order.orderId] = dict (state = trade.orderStatus,
order = trade.order,
contract = trade.contract,
order_id = trade.order.orderId,
)
log.debug(
"Order-{order_id} {status}: "
"{order_action} {order_count} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price}".format(
order_id=trade.order.orderId,
status=trade.orderStatus.status,
order_action=trade.order.action,
order_count=trade.order.totalQuantity,
symbol=trade.contract.symbol,
order_type=trade.order.orderType,
limit_price=trade.order.lmtPrice,
stop_price=trade.order.auxPrice))
def orderStatus(self, trade):
self.order_statuses[trade.order.orderId] = dict (why_held = trade.orderStatus.whyHeld,
client_id=trade.orderStatus.clientId,
last_fill_price=trade.orderStatus.lastFillPrice,
parent_id=trade.orderStatus.parentId,
perm_id=trade.order.permId,
avg_fill_price=trade.orderStatus.avgFillPrice,
remaining=trade.orderStatus.remaining,
filled=trade.orderStatus.filled,
status=trade.orderStatus.status,
order_id=trade.order.orderId,
)
log.debug(
"Order-{order_id} {status}: "
"filled={filled} remaining={remaining} "
"avg_fill_price={avg_fill_price} "
"last_fill_price={last_fill_price} ".format(
order_id=trade.order.orderId,
status=self.order_statuses[trade.order.orderId]['status'],
filled=self.order_statuses[trade.order.orderId]['filled'],
remaining=self.order_statuses[trade.order.orderId]['remaining'],
avg_fill_price=self.order_statuses[trade.order.orderId]['avg_fill_price'],
last_fill_price=self.order_statuses[trade.order.orderId]['last_fill_price']))
@property
def next_order_id(self):
order_id = self.client.getReqId()
return order_id
def is_alive(self):
return self.isConnected()
@property
def next_ticker_id(self):
ticker_id = self._next_ticker_id
self._next_ticker_id += 1
return ticker_id
def subscribe_to_market_data (self, symbol, tick_list='232'):
contract = Contract()
contract.symbol = symbol
contract.secType = symbol_to_sec_type[symbol]
contract.exchange = symbol_to_exchange[symbol]
contract.currency = self.currency
ticker = self.reqMktData(contract, tick_list)
for i in range(0, max_wait_cycles):
self.sleep(wait_step)
if not pd.isna(ticker.close) or not pd.isna(ticker.last):
break
return ticker
def get_spot_value(self, assets, field, dt, data_frequency):
symbol = str(assets.symbol)
ticker_id = self.next_ticker_id
self.symbol_to_ticker_id[symbol] = ticker_id
self.ticker_id_to_symbol[ticker_id] = symbol
ticker = self.subscribe_to_market_data(symbol)
self._subscribed_assets.append (assets)
if not ticker.time:
log.error(f"No data retrieved on symbol {symbol}!")
return pd.NaT if field == 'last_traded' else np.NaN
if field == 'price':
return ticker.last if not pd.isna(ticker.last) and ticker.last > 0 else ticker.close
if field == 'last_traded':
return pd.NaT
if field == 'open':
return ticker.open
if field == 'close':
return ticker.close
if field == 'high':
return ticker.high
if field == 'low':
return ticker.low
if field == 'volume':
return ticker.volume
def set_metrics_tracker(self, metrics_tracker):
self.metrics_tracker = metrics_tracker
def _update_orders(self):
def _update_from_order_status(zp_order, ib_order_id):
if ib_order_id in self.open_orders:
open_order_state = self.open_orders[ib_order_id]['state']
zp_status = self._ib_to_zp_status(open_order_state.status)
if zp_status is None:
log.warning(
"Order-{order_id}: "
"unknown order status: {order_status}.".format(
order_id=ib_order_id,
order_status=open_order_state.status))
else:
zp_order.status = zp_status
if ib_order_id in self.order_statuses:
order_status = self.order_statuses[ib_order_id]
zp_order.filled = order_status['filled']
zp_status = self._ib_to_zp_status(order_status['status'])
if zp_status:
zp_order.status = zp_status
else:
log.warning("Order-{order_id}: "
"unknown order status: {order_status}."
.format(order_id=ib_order_id,
order_status=order_status['status']))
def _update_from_execution(zp_order, ib_order_id):
if ib_order_id in self.executions and \
ib_order_id not in self.open_orders:
zp_order.status = ZP_ORDER_STATUS.FILLED
executions = self.executions[ib_order_id]
last_exec_detail = \
list(executions.values())[-1]['exec_detail']
zp_order.filled = last_exec_detail.cumQty
all_ib_order_ids = (set([e.broker_order_id
for e in self._orders.values()]) |
set(self.open_orders.keys()) |
set(self.order_statuses.keys()) |
set(self.executions.keys()) |
set(self.commissions.keys()))
for ib_order_id in all_ib_order_ids:
zp_order = self._get_or_create_zp_order(ib_order_id)
if zp_order:
_update_from_execution(zp_order, ib_order_id)
_update_from_order_status(zp_order, ib_order_id)
@property
def orders(self):
self._update_orders()
return self._orders
@staticmethod
def _safe_symbol_lookup(symbol):
try:
return symbol_lookup(symbol)
except SymbolNotFound:
return None
def _ib_to_zp_order_id(self, ib_order_id):
return "IB-{date}-{account_id}-{client_id}-{order_id}".format(
date=str(pd.to_datetime('today').date()),
account_id=self.account_id,
client_id=self.client_id,
order_id=ib_order_id)
@staticmethod
def _action_qty_to_amount(action, qty):
return qty if action == 'BUY' else -1 * qty
_zl_order_ref_magic = '!ZL'
@classmethod
def _create_order_ref(cls, ib_order:Order, dt=pd.to_datetime('now', utc=True)):
order_type = ib_order.orderType.replace(' ', '_')
return \
"A:{action} Q:{qty} T:{order_type} " \
"L:{limit_price} S:{stop_price} D:{date} {magic}".format(
action=ib_order.action,
qty=ib_order.totalQuantity,
order_type=order_type,
limit_price=ib_order.lmtPrice,
stop_price=ib_order.auxPrice,
date=int(dt.value / 1e9),
magic=cls._zl_order_ref_magic)
@classmethod
def _parse_order_ref(cls, ib_order_ref):
if not ib_order_ref or \
not ib_order_ref.endswith(cls._zl_order_ref_magic):
return None
try:
action, qty, order_type, limit_price, stop_price, dt, _ = \
ib_order_ref.split(' ')
if not all(
[action.startswith('A:'),
qty.startswith('Q:'),
order_type.startswith('T:'),
limit_price.startswith('L:'),
stop_price.startswith('S:'),
dt.startswith('D:')]):
return None
return {
'action': action[2:],
'qty': int(qty[2:]),
'order_type': order_type[2:].replace('_', ' '),
'limit_price': float(limit_price[2:]),
'stop_price': float(stop_price[2:]),
'dt': pd.to_datetime(dt[2:], unit='s', utc=True)}
except ValueError:
log.warning("Error parsing order metadata: {}".format(
ib_order_ref))
return None
@staticmethod
def _ib_to_zp_status(ib_status):
ib_status = ib_status.lower()
if ib_status == 'submitted':
return ZP_ORDER_STATUS.OPEN
elif ib_status in ('pendingsubmit',
'pendingcancel',
'presubmitted'):
return ZP_ORDER_STATUS.HELD
elif ib_status == 'cancelled':
return ZP_ORDER_STATUS.CANCELLED
elif ib_status == 'filled':
return ZP_ORDER_STATUS.FILLED
elif ib_status == 'inactive':
return ZP_ORDER_STATUS.REJECTED
else:
return None
def _get_or_create_zp_order(self, ib_order_id,
ib_order=None, ib_contract=None):
zp_order_id = self._ib_to_zp_order_id(ib_order_id)
if zp_order_id in self._orders:
return self._orders[zp_order_id]
# Try to reconstruct the order from the given information:
# open order state and execution state
symbol, order_details = None, None
if ib_order and ib_contract:
symbol = ib_contract.symbol
order_details = self._parse_order_ref(ib_order.orderRef)
if not order_details and ib_order_id in self.open_orders:
open_order = self.open_orders[ib_order_id]
symbol = open_order['contract'].symbol
order_details = self._parse_order_ref(
open_order['order'].orderRef)
if not order_details and ib_order_id in self.executions:
executions = self.executions[ib_order_id]
last_exec_detail = list(executions.values())[-1]['exec_detail']
last_exec_contract = list(executions.values())[-1]['contract']
symbol = last_exec_contract.symbol
order_details = self._parse_order_ref(last_exec_detail.orderRef)
asset = self._safe_symbol_lookup(symbol)
if not asset:
log.warning(
"Ignoring symbol {symbol} which has associated "
"order but it is not registered in bundle".format(
symbol=symbol))
return None
if order_details:
amount = self._action_qty_to_amount(order_details['action'],
order_details['qty'])
stop_price = order_details['stop_price']
limit_price = order_details['limit_price']
dt = order_details['dt']
else:
dt = pd.to_datetime('now', utc=True)
amount, stop_price, limit_price = 0, None, None
if ib_order_id in self.open_orders:
open_order = self.open_orders[ib_order_id]['order']
amount = self._action_qty_to_amount(
open_order.action, open_order.totalQuantity)
stop_price = open_order.auxPrice
limit_price = open_order.lmtPrice
stop_price = None if stop_price == 0 else stop_price
limit_price = None if limit_price == 0 else limit_price
self._orders[zp_order_id] = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id)
self._orders[zp_order_id].broker_order_id = ib_order_id
return self._orders[zp_order_id]
@property
def transactions(self):
self._update_transactions()
return self._transactions
def _update_transactions(self):
all_orders = list(self.orders.values())
for ib_order_id, executions in iteritems(self.executions):
orders = [order
for order in all_orders
if order.broker_order_id == ib_order_id]
if not orders:
log.warning("No order found for executions: {}".format(
executions))
continue
assert len(orders) == 1
order = orders[0]
for exec_id, execution in iteritems(executions):
if exec_id in self._transactions:
continue
try:
commission = self.commissions[ib_order_id][exec_id] \
.commission
except KeyError:
log.warning(
"Commission not found for execution: {}".format(
exec_id))
commission = 0
exec_detail = execution['exec_detail']
is_buy = order.amount > 0
amount = (exec_detail.shares if is_buy
else -1 * exec_detail.shares)
tx = Transaction(
asset=order.asset,
amount=amount,
dt=pd.to_datetime(exec_detail.time, utc=True),
price=exec_detail.price,
order_id=order.id
)
self._transactions[exec_id] = tx
@property
def positions(self):
self._get_positions_from_broker()
return self.metrics_tracker.positions
@property
def subscribed_assets(self):
return self._subscribed_assets
def _get_positions_from_broker(self):
"""
get the positions from the broker and update zipline objects ( the ledger )
should be used once at startup and once every time we want to refresh the positions array
"""
cur_pos_in_tracker = self.metrics_tracker.positions
ib_positions = IB.positions(self, self.account_id)
for ib_position in ib_positions:
try:
z_position = zp.Position(zp.InnerPosition(symbol_lookup(ib_position.contract.symbol)))
editable_position = MutableView(z_position)
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
log.warning('Wanted to subscribe to %s, but this asset is probably not ingested' % symbol)
continue
editable_position._underlying_position.amount = int(ib_position.position)
editable_position._underlying_position.cost_basis = float(ib_position.avgCost)
ticker = self.subscribe_to_market_data(ib_position.contract.symbol)
editable_position._underlying_position.last_sale_price = \
ticker.last if not pd.isna(ticker.last) and ticker.last > 0 else ticker.close
editable_position._underlying_position.last_sale_date = \
ticker.time if len(ticker.ticks)==0 else ticker.ticks[-1].time
self.metrics_tracker.update_position(z_position.asset,
amount=z_position.amount,
last_sale_price=z_position.last_sale_price,
last_sale_date=z_position.last_sale_date,
cost_basis=z_position.cost_basis)
for asset in cur_pos_in_tracker:
if asset.symbol not in [p.contract.symbol for p in ib_positions]:
# if asset.symbol not in self.positions:
# deleting object from the metrcs_tracker as its not in the portfolio
self.metrics_tracker.update_position(asset,
amount=0)
# # for some reason, the metrics tracker has self.positions AND self.portfolio.positions. let's make sure
# # these objects are consistent
# # (self.portfolio.positions is self.metrics_tracker._ledger._portfolio.positions)
# # (self.metrics_tracker.positions is self.metrics_tracker._ledger.position_tracker.positions)
self.metrics_tracker._ledger._portfolio.positions = self.metrics_tracker.positions
@property
def portfolio(self):
positions = self.positions
return self.metrics_tracker.portfolio
def order(self, asset, amount, style):
contract = Contract()
contract.symbol = str(asset.symbol)
contract.currency = self.currency
contract.exchange = symbol_to_exchange[str(asset.symbol)]
contract.secType = symbol_to_sec_type[str(asset.symbol)]
order = Order()
order.totalQuantity = int(fabs(amount))
order.action = "BUY" if amount > 0 else "SELL"
is_buy = (amount > 0)
order.lmtPrice = style.get_limit_price(is_buy) or 0
order.auxPrice = style.get_stop_price(is_buy) or 0
if isinstance(style, MarketOrder):
order.orderType = "MKT"
elif isinstance(style, LimitOrder):
order.orderType = "LMT"
elif isinstance(style, StopOrder):
order.orderType = "STP"
elif isinstance(style, StopLimitOrder):
order.orderType = "STP LMT"
# TODO: Support GTC orders both here and at blotter_live
order.tif = "DAY"
order.orderRef = self._create_order_ref(order)
ib_order_id = self.next_order_id
order.orderId = ib_order_id
zp_order = self._get_or_create_zp_order(ib_order_id, order, contract)
log.info(
"Placing order-{order_id}: "
"{action} {qty} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price} {tif}".format(
order_id=ib_order_id,
action=order.action,
qty=order.totalQuantity,
symbol=contract.symbol,
order_type=order.orderType,
limit_price=order.lmtPrice,
stop_price=order.auxPrice,
tif=order.tif
))
self.placeOrder(contract, order)
return zp_order
def get_last_traded_dt(self, asset):
ticker = self.subscribe_to_market_data(asset)
return ticker.time if len(ticker.ticks)==0 else ticker.ticks[-1].time
def get_realtime_bars(self, assets, frequency):
if frequency == '1m':
resample_freq = '1 Min'
elif frequency == '1d':
resample_freq = '24 H'
else:
raise ValueError("Invalid frequency specified: %s" % frequency)
df = pd.DataFrame()
a=self.subscribe_to_market_data('AAWW')
for asset in assets:
symbol = str(asset.symbol)
ticker = self.subscribe_to_market_data(symbol)
trade_prices = ticker.last
trade_sizes = ticker.lastSize
ohlcv = trade_prices.resample(resample_freq).ohlc()
ohlcv['volume'] = trade_sizes.resample(resample_freq).sum()
# Add asset as level 0 column; ohlcv will be used as level 1 cols
ohlcv.columns = pd.MultiIndex.from_product([[asset, ],
ohlcv.columns])
df = pd.concat([df, ohlcv], axis=1)
return df | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/gens/brokers/ib_broker2.py | ib_broker2.py |
import sys
from collections import namedtuple, defaultdict, OrderedDict
from time import sleep
from math import fabs
from six import iteritems, itervalues
import polling
import pandas as pd
import numpy as np
from zipline.gens.brokers.broker import Broker
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.protocol import MutableView
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
from ib.ext.EClientSocket import EClientSocket
from ib.ext.EWrapper import EWrapper
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.EClientErrors import EClientErrors
from logbook import Logger
if sys.version_info > (3,):
long = int
log = Logger('IB Broker')
Position = namedtuple('Position', ['contract', 'position', 'market_price',
'market_value', 'average_cost',
'unrealized_pnl', 'realized_pnl',
'account_name'])
_max_wait_subscribe = 10 # how many cycles to wait
_connection_timeout = 15 # Seconds
_poll_frequency = 0.1
symbol_to_exchange = defaultdict(lambda: 'SMART')
symbol_to_exchange['VIX'] = 'CBOE'
symbol_to_exchange['SPX'] = 'CBOE'
symbol_to_exchange['VIX3M'] = 'CBOE'
symbol_to_exchange['VXST'] = 'CBOE'
symbol_to_exchange['VXMT'] = 'CBOE'
symbol_to_exchange['GVZ'] = 'CBOE'
symbol_to_exchange['GLD'] = 'ARCA'
symbol_to_exchange['GDX'] = 'ARCA'
symbol_to_exchange['GPRO'] = 'SMART/NASDAQ'
symbol_to_exchange['MSFT'] = 'SMART/NASDAQ'
symbol_to_exchange['CSCO'] = 'SMART/NASDAQ'
symbol_to_sec_type = defaultdict(lambda: 'STK')
symbol_to_sec_type['VIX'] = 'IND'
symbol_to_sec_type['VIX3M'] = 'IND'
symbol_to_sec_type['VXST'] = 'IND'
symbol_to_sec_type['VXMT'] = 'IND'
symbol_to_sec_type['GVZ'] = 'IND'
symbol_to_sec_type['SPX'] = 'IND'
def log_message(message, mapping):
try:
del (mapping['self'])
except (KeyError,):
pass
items = list(mapping.items())
items.sort()
log.debug(('### %s' % (message,)))
for k, v in items:
log.debug((' %s:%s' % (k, v)))
def _method_params_to_dict(args):
return {k: v
for k, v in iteritems(args)
if k != 'self'}
class TWSConnection(EClientSocket, EWrapper):
def __init__(self, tws_uri):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
EWrapper.__init__(self)
EClientSocket.__init__(self, anyWrapper=self)
self.tws_uri = tws_uri
host, port, client_id = self.tws_uri.split(':')
self._host = host
self._port = int(port)
self.client_id = int(client_id)
self._next_ticker_id = 0
self._next_request_id = 0
self._next_order_id = None
self.managed_accounts = None
self.symbol_to_ticker_id = {}
self.ticker_id_to_symbol = {}
self.last_tick = defaultdict(dict)
self.bars = {}
# accounts structure: accounts[account_id][currency][value]
self.accounts = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: np.NaN)))
self.accounts_download_complete = False
self.positions = {}
self.portfolio = {}
self.open_orders = {}
self.order_statuses = {}
self.executions = defaultdict(OrderedDict)
self.commissions = defaultdict(OrderedDict)
self._execution_to_order_id = {}
self.time_skew = None
self.unrecoverable_error = False
self.connect()
def connect(self):
log.info("Connecting: {}:{}:{}".format(self._host, self._port,
self.client_id))
self.eConnect(self._host, self._port, self.client_id)
timeout = _connection_timeout
while timeout and not self.isConnected():
sleep(_poll_frequency)
timeout -= _poll_frequency
else:
if not self.isConnected():
raise SystemError("Connection timeout during TWS connection!")
self._download_account_details()
log.info("Managed accounts: {}".format(self.managed_accounts))
self.reqCurrentTime()
self.reqIds(1)
while self.time_skew is None or self._next_order_id is None:
sleep(_poll_frequency)
log.info("Local-Broker Time Skew: {}".format(self.time_skew))
def _download_account_details(self):
exec_filter = ExecutionFilter()
exec_filter.m_clientId = self.client_id
self.reqExecutions(self.next_request_id, exec_filter)
self.reqManagedAccts()
while self.managed_accounts is None:
sleep(_poll_frequency)
for account in self.managed_accounts:
self.reqAccountUpdates(subscribe=True, acctCode=account)
while self.accounts_download_complete is False:
sleep(_poll_frequency)
@property
def next_ticker_id(self):
ticker_id = self._next_ticker_id
self._next_ticker_id += 1
return ticker_id
@property
def next_request_id(self):
request_id = self._next_request_id
self._next_request_id += 1
return request_id
@property
def next_order_id(self):
order_id = self._next_order_id
self._next_order_id += 1
return order_id
def subscribe_to_market_data(self,
symbol,
sec_type='STK',
exchange='SMART',
currency='USD'):
if symbol in self.symbol_to_ticker_id:
# Already subscribed to market data
return
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = symbol_to_sec_type[symbol]
contract.m_exchange = symbol_to_exchange[symbol]
contract.m_currency = currency
ticker_id = self.next_ticker_id
self.symbol_to_ticker_id[symbol] = ticker_id
self.ticker_id_to_symbol[ticker_id] = symbol
# INDEX tickers cannot be requested with market data. The data can,
# however, be requested with realtimeBars. This change will make
# sure we can request data from INDEX tickers like SPX, VIX, etc.
if contract.m_secType == 'IND':
self.reqRealTimeBars(ticker_id, contract, 60, 'TRADES', True)
else:
tick_list = "233" # RTVolume, return tick_type == 48
self.reqMktData(ticker_id, contract, tick_list, False)
sleep(11)
def _process_tick(self, ticker_id, tick_type, value):
try:
symbol = self.ticker_id_to_symbol[ticker_id]
except KeyError:
log.error("Tick {} for id={} is not registered".format(tick_type,
ticker_id))
return
if tick_type == 48:
# RT Volume Bar. Format:
# Last trade price; Last trade size;Last trade time;Total volume;\
# VWAP;Single trade flag
# e.g.: 701.28;1;1348075471534;67854;701.46918464;true
(last_trade_price, last_trade_size, last_trade_time, total_volume,
vwap, single_trade_flag) = value.split(';')
# Ignore this update if last_trade_price is empty:
# tickString: tickerId=0 tickType=48/RTVolume ;0;1469805548873;\
# 240304;216.648653;true
if len(last_trade_price) == 0:
return
last_trade_dt = pd.to_datetime(float(last_trade_time), unit='ms',
utc=True)
self._add_bar(symbol, float(last_trade_price),
int(last_trade_size), last_trade_dt,
int(total_volume), float(vwap),
single_trade_flag)
def _add_bar(self, symbol, last_trade_price, last_trade_size,
last_trade_time, total_volume, vwap, single_trade_flag):
bar = pd.DataFrame(index=pd.DatetimeIndex([last_trade_time]),
data={'last_trade_price': last_trade_price,
'last_trade_size': last_trade_size,
'total_volume': total_volume,
'vwap': vwap,
'single_trade_flag': single_trade_flag})
if symbol not in self.bars:
self.bars[symbol] = bar
else:
self.bars[symbol] = self.bars[symbol].append(bar)
def tickPrice(self, ticker_id, field, price, can_auto_execute):
self._process_tick(ticker_id, tick_type=field, value=price)
def tickSize(self, ticker_id, field, size):
self._process_tick(ticker_id, tick_type=field, value=size)
def tickOptionComputation(self,
ticker_id, field, implied_vol, delta, opt_price,
pv_dividend, gamma, vega, theta, und_price):
log_message('tickOptionComputation', vars())
def tickGeneric(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickString(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickEFP(self, ticker_id, tick_type, basis_points,
formatted_basis_points, implied_future, hold_days,
future_expiry, dividend_impact, dividends_to_expiry):
log_message('tickEFP', vars())
def updateAccountValue(self, key, value, currency, account_name):
self.accounts[account_name][currency][key] = value
def updatePortfolio(self,
contract,
position,
market_price,
market_value,
average_cost,
unrealized_pnl,
realized_pnl,
account_name):
symbol = contract.m_symbol
position = Position(contract=contract,
position=position,
market_price=market_price,
market_value=market_value,
average_cost=average_cost,
unrealized_pnl=unrealized_pnl,
realized_pnl=realized_pnl,
account_name=account_name)
self.positions[symbol] = position
def updateAccountTime(self, time_stamp):
pass
def accountDownloadEnd(self, account_name):
self.accounts_download_complete = True
def nextValidId(self, order_id):
self._next_order_id = order_id
def contractDetails(self, req_id, contract_details):
log_message('contractDetails', vars())
def contractDetailsEnd(self, req_id):
log_message('contractDetailsEnd', vars())
def bondContractDetails(self, req_id, contract_details):
log_message('bondContractDetails', vars())
def orderStatus(self, order_id, status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id, why_held):
self.order_statuses[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"filled={filled} remaining={remaining} "
"avg_fill_price={avg_fill_price} "
"last_fill_price={last_fill_price} ".format(
order_id=order_id,
status=self.order_statuses[order_id]['status'],
filled=self.order_statuses[order_id]['filled'],
remaining=self.order_statuses[order_id]['remaining'],
avg_fill_price=self.order_statuses[order_id]['avg_fill_price'],
last_fill_price=self.order_statuses[order_id]['last_fill_price']))
def openOrder(self, order_id, contract, order, state):
self.open_orders[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"{order_action} {order_count} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price}".format(
order_id=order_id,
status=state.m_status,
order_action=order.m_action,
order_count=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice))
def openOrderEnd(self):
pass
def execDetails(self, req_id, contract, exec_detail):
order_id, exec_id = exec_detail.m_orderId, exec_detail.m_execId
self.executions[order_id][exec_id] = _method_params_to_dict(vars())
self._execution_to_order_id[exec_id] = order_id
log.info(
"Order-{order_id} executed @ {exec_time}: "
"{symbol} current: {shares} @ ${price} "
"total: {cum_qty} @ ${avg_price} "
"exec_id: {exec_id} by client-{client_id}".format(
order_id=order_id, exec_id=exec_id,
exec_time=pd.to_datetime(exec_detail.m_time),
symbol=contract.m_symbol,
shares=exec_detail.m_shares,
price=exec_detail.m_price,
cum_qty=exec_detail.m_cumQty,
avg_price=exec_detail.m_avgPrice,
client_id=exec_detail.m_clientId))
def execDetailsEnd(self, req_id):
log.debug(
"Execution details completed for request {req_id}".format(
req_id=req_id))
def commissionReport(self, commission_report):
exec_id = commission_report.m_execId
# we need this check for the case when IB is sending report for the
# order which was placed by another session out of market hours
# in this case current session does not have info on the exec_id
if commission_report.m_execId in self._execution_to_order_id:
# we do have info on the report within session
order_id = self._execution_to_order_id[commission_report.m_execId]
self.commissions[order_id][exec_id] = commission_report
log.debug(
"Order-{order_id} report: "
"realized_pnl: ${realized_pnl} "
"commission: ${commission} yield: {yield_} "
"exec_id: {exec_id}".format(
order_id=order_id,
exec_id=commission_report.m_execId,
realized_pnl=commission_report.m_realizedPNL
if commission_report.m_realizedPNL != sys.float_info.max
else 0,
commission=commission_report.m_commission,
yield_=commission_report.m_yield
if commission_report.m_yield != sys.float_info.max
else 0)
)
else:
# we do have info on the report within session
log.debug(
"Commission report is sent by TWS however exec_id is not found within current session"
"exec_id: {exec_id}".format(
exec_id=commission_report.m_execId,
)
)
pass
def connectionClosed(self):
self.unrecoverable_error = True
log.error("IB Connection closed")
def error(self, id_=None, error_code=None, error_msg=None):
if isinstance(id_, Exception):
# XXX: for an unknown reason 'log' is None in this branch,
# therefore it needs to be instantiated before use
global log
if not log:
log = Logger('IB Broker')
log.exception(id_)
if isinstance(error_code, EClientErrors.CodeMsgPair):
error_msg = error_code.msg()
error_code = error_code.code()
if isinstance(error_code, int):
if error_code in (502, 503, 326):
# 502: Couldn't connect to TWS.
# 503: The TWS is out of date and must be upgraded.
# 326: Unable connect as the client id is already in use.
self.unrecoverable_error = True
if error_code < 1000:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.info("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
def updateMktDepth(self, ticker_id, position, operation, side, price,
size):
log_message('updateMktDepth', vars())
def updateMktDepthL2(self, ticker_id, position, market_maker, operation,
side, price, size):
log_message('updateMktDepthL2', vars())
def updateNewsBulletin(self, msg_id, msg_type, message, orig_exchange):
log_message('updateNewsBulletin', vars())
def managedAccounts(self, accounts_list):
self.managed_accounts = accounts_list.split(',')
def receiveFA(self, fa_data_type, xml):
log_message('receiveFA', vars())
def historicalData(self, req_id, date, open_, high, low, close, volume,
count, wap, has_gaps):
log_message('historicalData', vars())
def scannerParameters(self, xml):
log_message('scannerParameters', vars())
def scannerData(self, req_id, rank, contract_details, distance, benchmark,
projection, legs_str):
log_message('scannerData', vars())
def currentTime(self, time):
self.time_skew = (pd.to_datetime('now', utc=True) -
pd.to_datetime(long(time), unit='s', utc=True))
def deltaNeutralValidation(self, req_id, under_comp):
log_message('deltaNeutralValidation', vars())
def fundamentalData(self, req_id, data):
log_message('fundamentalData', vars())
def marketDataType(self, req_id, market_data_type):
log_message('marketDataType', vars())
def realtimeBar(self, req_id, time, open_, high, low, close, volume, wap,
count):
value = (";".join([str(close), str(count), str(time), str(volume),
str(wap), "true"]))
self._process_tick(req_id, tick_type=48, value=value)
def scannerDataEnd(self, req_id):
log_message('scannerDataEnd', vars())
def tickSnapshotEnd(self, req_id):
log_message('tickSnapshotEnd', vars())
def position(self, account, contract, pos, avg_cost):
log_message('position', vars())
def positionEnd(self):
log_message('positionEnd', vars())
def accountSummary(self, req_id, account, tag, value, currency):
log_message('accountSummary', vars())
def accountSummaryEnd(self, req_id):
log_message('accountSummaryEnd', vars())
class IBBroker(Broker):
def __init__(self, tws_uri, account_id=None):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
self._tws_uri = tws_uri
self._orders = {}
self._transactions = {}
self._tws = TWSConnection(tws_uri)
self.account_id = (self._tws.managed_accounts[0] if account_id is None
else account_id)
self.currency = 'USD'
self._subscribed_assets = []
super(self.__class__, self).__init__()
@property
def subscribed_assets(self):
return self._subscribed_assets
def subscribe_to_market_data(self, asset):
if asset not in self.subscribed_assets:
log.info("Subscribing to market data for {}".format(
asset))
# remove str() cast to have a fun debugging journey
self._tws.subscribe_to_market_data(str(asset.symbol))
self._subscribed_assets.append(asset)
try:
polling.poll(
lambda: asset.symbol in self._tws.bars,
timeout=_max_wait_subscribe,
step=_poll_frequency)
except polling.TimeoutException as te:
log.warning('!!!WARNING: I did not manage to subscribe to %s ' % str(asset.symbol))
else:
log.debug("Subscription completed")
@property
def positions(self):
self._get_positions_from_broker()
return self.metrics_tracker.positions
def _get_positions_from_broker(self):
"""
get the positions from the broker and update zipline objects ( the ledger )
should be used once at startup and once every time we want to refresh the positions array
"""
cur_pos_in_tracker = self.metrics_tracker.positions
for symbol in self._tws.positions:
ib_position = self._tws.positions[symbol]
try:
z_position = zp.Position(zp.InnerPosition(symbol_lookup(symbol)))
editable_position = MutableView(z_position)
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
log.warning('Wanted to subscribe to %s, but this asset is probably not ingested' % symbol)
continue
editable_position._underlying_position.amount = int(ib_position.position)
editable_position._underlying_position.cost_basis = float(ib_position.average_cost)
# Check if symbol exists in bars df
if symbol in self._tws.bars:
editable_position._underlying_position.last_sale_price = \
float(self._tws.bars[symbol].last_trade_price.iloc[-1])
editable_position._underlying_position.last_sale_date = \
self._tws.bars[symbol].index.values[-1]
else:
# editable_position._underlying_position.last_sale_price = None # this cannot be set to None. only numbers.
editable_position._underlying_position.last_sale_date = None
self.metrics_tracker.update_position(z_position.asset,
amount=z_position.amount,
last_sale_price=z_position.last_sale_price,
last_sale_date=z_position.last_sale_date,
cost_basis=z_position.cost_basis)
for asset in cur_pos_in_tracker:
if asset.symbol not in self._tws.positions:
# deleting object from the metrcs_tracker as its not in the portfolio
self.metrics_tracker.update_position(asset,
amount=0)
# for some reason, the metrics tracker has self.positions AND self.portfolio.positions. let's make sure
# these objects are consistent
# (self.portfolio.positions is self.metrics_tracker._ledger._portfolio.positions)
# (self.metrics_tracker.positions is self.metrics_tracker._ledger.position_tracker.positions)
self.metrics_tracker._ledger._portfolio.positions = self.metrics_tracker.positions
@property
def portfolio(self):
positions = self.positions
return self.metrics_tracker.portfolio
def get_account_from_broker(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
return ib_account
def set_metrics_tracker(self, metrics_tracker):
self.metrics_tracker = metrics_tracker
@property
def account(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
self.metrics_tracker.override_account_fields(
settled_cash=float(ib_account['CashBalance']),
accrued_interest=float(ib_account['AccruedCash']),
buying_power=float(ib_account['BuyingPower']),
equity_with_loan=float(ib_account['EquityWithLoanValue']),
total_positions_value=float(ib_account['StockMarketValue']),
total_positions_exposure=float(
(float(ib_account['StockMarketValue']) /
(float(ib_account['StockMarketValue']) +
float(ib_account['TotalCashValue'])))),
regt_equity=float(ib_account['RegTEquity']),
regt_margin=float(ib_account['RegTMargin']),
initial_margin_requirement=float(
ib_account['FullInitMarginReq']),
maintenance_margin_requirement=float(
ib_account['FullMaintMarginReq']),
available_funds=float(ib_account['AvailableFunds']),
excess_liquidity=float(ib_account['ExcessLiquidity']),
cushion=float(
self._tws.accounts[self.account_id]['']['Cushion']),
day_trades_remaining=float(
self._tws.accounts[self.account_id]['']['DayTradesRemaining']),
leverage=float(
self._tws.accounts[self.account_id]['']['Leverage-S']),
net_leverage=(
float(ib_account['StockMarketValue']) /
(float(ib_account['TotalCashValue']) +
float(ib_account['StockMarketValue']))),
net_liquidation=float(ib_account['NetLiquidation'])
)
return self.metrics_tracker.account
@property
def time_skew(self):
return self._tws.time_skew
def is_alive(self):
return not self._tws.unrecoverable_error
@staticmethod
def _safe_symbol_lookup(symbol):
try:
return symbol_lookup(symbol)
except SymbolNotFound:
return None
_zl_order_ref_magic = '!ZL'
@classmethod
def _create_order_ref(cls, ib_order, dt=pd.to_datetime('now', utc=True)):
order_type = ib_order.m_orderType.replace(' ', '_')
return \
"A:{action} Q:{qty} T:{order_type} " \
"L:{limit_price} S:{stop_price} D:{date} {magic}".format(
action=ib_order.m_action,
qty=ib_order.m_totalQuantity,
order_type=order_type,
limit_price=ib_order.m_lmtPrice,
stop_price=ib_order.m_auxPrice,
date=int(dt.value / 1e9),
magic=cls._zl_order_ref_magic)
@classmethod
def _parse_order_ref(cls, ib_order_ref):
if not ib_order_ref or \
not ib_order_ref.endswith(cls._zl_order_ref_magic):
return None
try:
action, qty, order_type, limit_price, stop_price, dt, _ = \
ib_order_ref.split(' ')
if not all(
[action.startswith('A:'),
qty.startswith('Q:'),
order_type.startswith('T:'),
limit_price.startswith('L:'),
stop_price.startswith('S:'),
dt.startswith('D:')]):
return None
return {
'action': action[2:],
'qty': int(qty[2:]),
'order_type': order_type[2:].replace('_', ' '),
'limit_price': float(limit_price[2:]),
'stop_price': float(stop_price[2:]),
'dt': pd.to_datetime(dt[2:], unit='s', utc=True)}
except ValueError:
log.warning("Error parsing order metadata: {}".format(
ib_order_ref))
return None
def order(self, asset, amount, style):
contract = Contract()
contract.m_symbol = str(asset.symbol)
contract.m_currency = self.currency
contract.m_exchange = symbol_to_exchange[str(asset.symbol)]
contract.m_secType = symbol_to_sec_type[str(asset.symbol)]
order = Order()
order.m_totalQuantity = int(fabs(amount))
order.m_action = "BUY" if amount > 0 else "SELL"
is_buy = (amount > 0)
order.m_lmtPrice = style.get_limit_price(is_buy) or 0
order.m_auxPrice = style.get_stop_price(is_buy) or 0
if isinstance(style, MarketOrder):
order.m_orderType = "MKT"
elif isinstance(style, LimitOrder):
order.m_orderType = "LMT"
elif isinstance(style, StopOrder):
order.m_orderType = "STP"
elif isinstance(style, StopLimitOrder):
order.m_orderType = "STP LMT"
# TODO: Support GTC orders both here and at blotter_live
order.m_tif = "DAY"
order.m_orderRef = self._create_order_ref(order)
ib_order_id = self._tws.next_order_id
zp_order = self._get_or_create_zp_order(ib_order_id, order, contract)
log.info(
"Placing order-{order_id}: "
"{action} {qty} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price} {tif}".format(
order_id=ib_order_id,
action=order.m_action,
qty=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice,
tif=order.m_tif
))
self._tws.placeOrder(ib_order_id, contract, order)
return zp_order
@property
def orders(self):
self._update_orders()
return self._orders
def _ib_to_zp_order_id(self, ib_order_id):
return "IB-{date}-{account_id}-{client_id}-{order_id}".format(
date=str(pd.to_datetime('today').date()),
account_id=self.account_id,
client_id=self._tws.client_id,
order_id=ib_order_id)
@staticmethod
def _action_qty_to_amount(action, qty):
return qty if action == 'BUY' else -1 * qty
def _get_or_create_zp_order(self, ib_order_id,
ib_order=None, ib_contract=None):
zp_order_id = self._ib_to_zp_order_id(ib_order_id)
if zp_order_id in self._orders:
return self._orders[zp_order_id]
# Try to reconstruct the order from the given information:
# open order state and execution state
symbol, order_details = None, None
if ib_order and ib_contract:
symbol = ib_contract.m_symbol
order_details = self._parse_order_ref(ib_order.m_orderRef)
if not order_details and ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]
symbol = open_order['contract'].m_symbol
order_details = self._parse_order_ref(
open_order['order'].m_orderRef)
if not order_details and ib_order_id in self._tws.executions:
executions = self._tws.executions[ib_order_id]
last_exec_detail = list(executions.values())[-1]['exec_detail']
last_exec_contract = list(executions.values())[-1]['contract']
symbol = last_exec_contract.m_symbol
order_details = self._parse_order_ref(last_exec_detail.m_orderRef)
asset = self._safe_symbol_lookup(symbol)
if not asset:
log.warning(
"Ignoring symbol {symbol} which has associated "
"order but it is not registered in bundle".format(
symbol=symbol))
return None
if order_details:
amount = self._action_qty_to_amount(order_details['action'],
order_details['qty'])
stop_price = order_details['stop_price']
limit_price = order_details['limit_price']
dt = order_details['dt']
else:
dt = pd.to_datetime('now', utc=True)
amount, stop_price, limit_price = 0, None, None
if ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]['order']
amount = self._action_qty_to_amount(
open_order.m_action, open_order.m_totalQuantity)
stop_price = open_order.m_auxPrice
limit_price = open_order.m_lmtPrice
stop_price = None if stop_price == 0 else stop_price
limit_price = None if limit_price == 0 else limit_price
self._orders[zp_order_id] = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id)
self._orders[zp_order_id].broker_order_id = ib_order_id
return self._orders[zp_order_id]
@staticmethod
def _ib_to_zp_status(ib_status):
ib_status = ib_status.lower()
if ib_status == 'submitted':
return ZP_ORDER_STATUS.OPEN
elif ib_status in ('pendingsubmit',
'pendingcancel',
'presubmitted'):
return ZP_ORDER_STATUS.HELD
elif ib_status == 'cancelled':
return ZP_ORDER_STATUS.CANCELLED
elif ib_status == 'filled':
return ZP_ORDER_STATUS.FILLED
elif ib_status == 'inactive':
return ZP_ORDER_STATUS.REJECTED
else:
return None
def _update_orders(self):
def _update_from_order_status(zp_order, ib_order_id):
if ib_order_id in self._tws.open_orders:
open_order_state = self._tws.open_orders[ib_order_id]['state']
zp_status = self._ib_to_zp_status(open_order_state.m_status)
if zp_status is None:
log.warning(
"Order-{order_id}: "
"unknown order status: {order_status}.".format(
order_id=ib_order_id,
order_status=open_order_state.m_status))
else:
zp_order.status = zp_status
if ib_order_id in self._tws.order_statuses:
order_status = self._tws.order_statuses[ib_order_id]
zp_order.filled = order_status['filled']
zp_status = self._ib_to_zp_status(order_status['status'])
if zp_status:
zp_order.status = zp_status
else:
log.warning("Order-{order_id}: "
"unknown order status: {order_status}."
.format(order_id=ib_order_id,
order_status=order_status['status']))
def _update_from_execution(zp_order, ib_order_id):
if ib_order_id in self._tws.executions and \
ib_order_id not in self._tws.open_orders:
zp_order.status = ZP_ORDER_STATUS.FILLED
executions = self._tws.executions[ib_order_id]
last_exec_detail = \
list(executions.values())[-1]['exec_detail']
zp_order.filled = last_exec_detail.m_cumQty
all_ib_order_ids = (set([e.broker_order_id
for e in self._orders.values()]) |
set(self._tws.open_orders.keys()) |
set(self._tws.order_statuses.keys()) |
set(self._tws.executions.keys()) |
set(self._tws.commissions.keys()))
for ib_order_id in all_ib_order_ids:
zp_order = self._get_or_create_zp_order(ib_order_id)
if zp_order:
_update_from_execution(zp_order, ib_order_id)
_update_from_order_status(zp_order, ib_order_id)
@property
def transactions(self):
self._update_transactions()
return self._transactions
def _update_transactions(self):
all_orders = list(self.orders.values())
for ib_order_id, executions in iteritems(self._tws.executions):
orders = [order
for order in all_orders
if order.broker_order_id == ib_order_id]
if not orders:
log.warning("No order found for executions: {}".format(
executions))
continue
assert len(orders) == 1
order = orders[0]
for exec_id, execution in iteritems(executions):
if exec_id in self._transactions:
continue
try:
commission = self._tws.commissions[ib_order_id][exec_id] \
.m_commission
except KeyError:
log.warning(
"Commission not found for execution: {}".format(
exec_id))
commission = 0
exec_detail = execution['exec_detail']
is_buy = order.amount > 0
amount = (exec_detail.m_shares if is_buy
else -1 * exec_detail.m_shares)
tx = Transaction(
asset=order.asset,
amount=amount,
dt=pd.to_datetime(exec_detail.m_time, utc=True),
price=exec_detail.m_price,
order_id=order.id
)
self._transactions[exec_id] = tx
def cancel_order(self, zp_order_id):
ib_order_id = self.orders[zp_order_id].broker_order_id
self._tws.cancelOrder(ib_order_id)
def get_spot_value(self, assets, field, dt, data_frequency):
symbol = str(assets.symbol)
self.subscribe_to_market_data(assets)
bars = self._tws.bars[symbol]
last_event_time = bars.index[-1]
minute_start = (last_event_time - pd.Timedelta('1 min')) \
.time()
minute_end = last_event_time.time()
if bars.empty:
return pd.NaT if field == 'last_traded' else np.NaN
else:
if field == 'price':
return bars.last_trade_price.iloc[-1]
elif field == 'last_traded':
return last_event_time or pd.NaT
minute_df = bars.between_time(minute_start, minute_end,
include_start=True, include_end=True)
if minute_df.empty:
return np.NaN
else:
if field == 'open':
return minute_df.last_trade_price.iloc[0]
elif field == 'close':
return minute_df.last_trade_price.iloc[-1]
elif field == 'high':
return minute_df.last_trade_price.max()
elif field == 'low':
return minute_df.last_trade_price.min()
elif field == 'volume':
return minute_df.last_trade_size.sum()
def get_last_traded_dt(self, asset):
self.subscribe_to_market_data(asset)
return self._tws.bars[asset.symbol].index[-1]
def get_realtime_bars(self, assets, frequency):
if frequency == '1m':
resample_freq = '1 Min'
elif frequency == '1d':
resample_freq = '24 H'
else:
raise ValueError("Invalid frequency specified: %s" % frequency)
df = pd.DataFrame()
for asset in assets:
symbol = str(asset.symbol)
self.subscribe_to_market_data(asset)
trade_prices = self._tws.bars[symbol]['last_trade_price']
trade_sizes = self._tws.bars[symbol]['last_trade_size']
ohlcv = trade_prices.resample(resample_freq).ohlc()
ohlcv['volume'] = trade_sizes.resample(resample_freq).sum()
# Add asset as level 0 column; ohlcv will be used as level 1 cols
ohlcv.columns = pd.MultiIndex.from_product([[asset, ],
ohlcv.columns])
df = pd.concat([df, ohlcv], axis=1)
return df | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/gens/brokers/ib_broker.py | ib_broker.py |
from textwrap import dedent
from numpy import (
array,
full,
recarray,
vstack,
)
from pandas import NaT as pd_NaT
from zipline.errors import (
WindowLengthNotPositive,
UnsupportedDataType,
NoFurtherDataError,
)
from zipline.utils.context_tricks import nop_context
from zipline.utils.input_validation import expect_types
from zipline.utils.sharedoc import (
format_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from zipline.utils.pandas_utils import nearest_unequal_elements
from .downsample_helpers import (
select_sampling_indices,
expect_downsample_frequency,
)
from .sentinels import NotSpecified
from .term import Term
class PositiveWindowLengthMixin(object):
"""
Validation mixin enforcing that a Term gets a positive WindowLength
"""
def _validate(self):
super(PositiveWindowLengthMixin, self)._validate()
if not self.windowed:
raise WindowLengthNotPositive(window_length=self.window_length)
class SingleInputMixin(object):
"""
Validation mixin enforcing that a Term gets a length-1 inputs list.
"""
def _validate(self):
super(SingleInputMixin, self)._validate()
num_inputs = len(self.inputs)
if num_inputs != 1:
raise ValueError(
"{typename} expects only one input, "
"but received {num_inputs} instead.".format(
typename=type(self).__name__,
num_inputs=num_inputs
)
)
class StandardOutputs(object):
"""
Validation mixin enforcing that a Term cannot produce non-standard outputs.
"""
def _validate(self):
super(StandardOutputs, self)._validate()
if self.outputs is not NotSpecified:
raise ValueError(
"{typename} does not support custom outputs,"
" but received custom outputs={outputs}.".format(
typename=type(self).__name__,
outputs=self.outputs,
)
)
class RestrictedDTypeMixin(object):
"""
Validation mixin enforcing that a term has a specific dtype.
"""
ALLOWED_DTYPES = NotSpecified
def _validate(self):
super(RestrictedDTypeMixin, self)._validate()
assert self.ALLOWED_DTYPES is not NotSpecified, (
"ALLOWED_DTYPES not supplied on subclass "
"of RestrictedDTypeMixin: %s." % type(self).__name__
)
if self.dtype not in self.ALLOWED_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
)
class CustomTermMixin(object):
"""
Mixin for user-defined rolling-window Terms.
Implements `_compute` in terms of a user-defined `compute` function, which
is mapped over the input windows.
Used by CustomFactor, CustomFilter, CustomClassifier, etc.
"""
ctx = nop_context
def __new__(cls,
inputs=NotSpecified,
outputs=NotSpecified,
window_length=NotSpecified,
mask=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
ndim=NotSpecified,
**kwargs):
unexpected_keys = set(kwargs) - set(cls.params)
if unexpected_keys:
raise TypeError(
"{termname} received unexpected keyword "
"arguments {unexpected}".format(
termname=cls.__name__,
unexpected={k: kwargs[k] for k in unexpected_keys},
)
)
return super(CustomTermMixin, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
dtype=dtype,
missing_value=missing_value,
ndim=ndim,
**kwargs
)
def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(
name=type(self).__name__
)
)
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out
def _format_inputs(self, windows, column_mask):
inputs = []
for input_ in windows:
window = next(input_)
if window.shape[1] == 1:
# Do not mask single-column inputs.
inputs.append(window)
else:
inputs.append(window[:, column_mask])
return inputs
def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + ':\l window_length: %d\l' % \
self.window_length
class LatestMixin(SingleInputMixin):
"""
Mixin for behavior shared by Custom{Factor,Filter,Classifier}.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
def _validate(self):
super(LatestMixin, self)._validate()
if self.inputs[0].dtype != self.dtype:
raise TypeError(
"{name} expected an input of dtype {expected}, "
"but got {actual} instead.".format(
name=type(self).__name__,
expected=self.dtype,
actual=self.inputs[0].dtype,
)
)
def graph_repr(self):
return "Latest"
class AliasedMixin(SingleInputMixin):
"""
Mixin for aliased terms.
"""
def __new__(cls, term, name):
return super(AliasedMixin, cls).__new__(
cls,
inputs=(term,),
outputs=term.outputs,
window_length=0,
name=name,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
window_safe=term.window_safe,
)
def _init(self, name, *args, **kwargs):
self.name = name
return super(AliasedMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, name, *args, **kwargs):
return (
super(AliasedMixin, cls)._static_identity(*args, **kwargs),
name,
)
def _compute(self, inputs, dates, assets, mask):
return inputs[0]
def __repr__(self):
return '{type}({inner_type}(...), name={name!r})'.format(
type=type(self).__name__,
inner_type=type(self.inputs[0]).__name__,
name=self.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return self.name
@classmethod
def make_aliased_type(cls, other_base):
"""
Factory for making Aliased{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that names another {t}.
Parameters
----------
term : {t}
{{name}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'name': PIPELINE_ALIAS_NAME_DOC},
)
return type(
'Aliased' + other_base.__name__,
(cls, other_base),
{'__doc__': doc,
'__module__': other_base.__module__},
)
class DownsampledMixin(StandardOutputs):
"""
Mixin for behavior shared by Downsampled{Factor,Filter,Classifier}
A downsampled term is a wrapper around the "real" term that performs actual
computation. The downsampler is responsible for calling the real term's
`compute` method at selected intervals and forward-filling the computed
values.
Downsampling is not currently supported for terms with multiple outputs.
"""
# There's no reason to take a window of a downsampled term. The whole
# point is that you're re-using the same result multiple times.
window_safe = False
@expect_types(term=Term)
@expect_downsample_frequency
def __new__(cls, term, frequency):
return super(DownsampledMixin, cls).__new__(
cls,
inputs=term.inputs,
outputs=term.outputs,
window_length=term.window_length,
mask=term.mask,
frequency=frequency,
wrapped_term=term,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
)
def _init(self, frequency, wrapped_term, *args, **kwargs):
self._frequency = frequency
self._wrapped_term = wrapped_term
return super(DownsampledMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):
return (
super(DownsampledMixin, cls)._static_identity(*args, **kwargs),
frequency,
wrapped_term,
)
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos)
def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i:i + 1],
assets,
mask[i:i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results)
@classmethod
def make_downsampled_type(cls, other_base):
"""
Factory for making Downsampled{Filter,Factor,Classifier}.
"""
docstring = dedent(
"""
A {t} that defers to another {t} at lower-than-daily frequency.
Parameters
----------
term : {t}
{{frequency}}
"""
).format(t=other_base.__name__)
doc = format_docstring(
owner_name=other_base.__name__,
docstring=docstring,
formatters={'frequency': PIPELINE_DOWNSAMPLING_FREQUENCY_DOC},
)
return type(
'Downsampled' + other_base.__name__,
(cls, other_base,),
{'__doc__': doc,
'__module__': other_base.__module__},
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/mixins.py | mixins.py |
from __future__ import unicode_literals
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from six import iteritems
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, '[]')
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, '}')
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
def filter_nodes(include_asset_exists, nodes):
if include_asset_exists:
return nodes
return filter(lambda n: n is not AssetExists(), nodes)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + '\n').encode('utf-8'))
def fmt(obj):
if isinstance(obj, Term):
r = obj.graph_repr()
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
'shape': 'box',
'colorscheme': 'pastel19',
'style': 'filled',
'label': fmt(term),
}
if isinstance(term, BoundColumn):
attrs['fillcolor'] = '1'
if isinstance(term, Factor):
attrs['fillcolor'] = '2'
elif isinstance(term, Filter):
attrs['fillcolor'] = '3'
elif isinstance(term, Classifier):
attrs['fillcolor'] = '4'
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/visualize.py | visualize.py |
import re
from itertools import chain
from numbers import Number
import numexpr
from numexpr.necompiler import getExprNames
from numpy import (
full,
inf,
)
from zipline.pipeline.term import Term, ComputableTerm
from zipline.utils.numpy_utils import bool_dtype
_VARIABLE_NAME_RE = re.compile("^(x_)([0-9]+)$")
# Map from op symbol to equivalent Python magic method name.
ops_to_methods = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
'%': '__mod__',
'**': '__pow__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__ge__',
'>': '__gt__',
}
# Map from method name to op symbol.
methods_to_ops = {v: k for k, v in ops_to_methods.items()}
# Map from op symbol to equivalent Python magic method name after flipping
# arguments.
ops_to_commuted_methods = {
'+': '__radd__',
'-': '__rsub__',
'*': '__rmul__',
'/': '__rdiv__',
'%': '__rmod__',
'**': '__rpow__',
'&': '__rand__',
'|': '__ror__',
'^': '__rxor__',
'<': '__gt__',
'<=': '__ge__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__le__',
'>': '__lt__',
}
unary_ops_to_methods = {
'-': '__neg__',
'~': '__invert__',
}
UNARY_OPS = {'-'}
MATH_BINOPS = {'+', '-', '*', '/', '**', '%'}
FILTER_BINOPS = {'&', '|'} # NumExpr doesn't support xor.
COMPARISONS = {'<', '<=', '!=', '>=', '>', '=='}
NUMEXPR_MATH_FUNCS = {
'sin',
'cos',
'tan',
'arcsin',
'arccos',
'arctan',
'sinh',
'cosh',
'tanh',
'arcsinh',
'arccosh',
'arctanh',
'log',
'log10',
'log1p',
'exp',
'expm1',
'sqrt',
'abs',
}
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
class BadBinaryOperator(TypeError):
"""
Called when a bad binary operation is encountered.
Parameters
----------
op : str
The attempted operation
left : zipline.computable.Term
The left hand side of the operation.
right : zipline.computable.Term
The right hand side of the operation.
"""
def __init__(self, op, left, right):
super(BadBinaryOperator, self).__init__(
"Can't compute {left} {op} {right}".format(
op=op,
left=type(left).__name__,
right=type(right).__name__,
)
)
def method_name_for_op(op, commute=False):
"""
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__'
"""
if commute:
return ops_to_commuted_methods[op]
return ops_to_methods[op]
def unary_op_name(op):
return unary_ops_to_methods[op]
def is_comparison(op):
return op in COMPARISONS
class NumericalExpression(ComputableTerm):
"""
Term binding to a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
dtype : np.dtype
The dtype for the expression.
"""
window_length = 0
def __new__(cls, expr, binds, dtype):
# We always allow filters to be used in windowed computations.
# Otherwise, an expression is window_safe if all its constituents are
# window_safe.
window_safe = (
(dtype == bool_dtype) or all(t.window_safe for t in binds)
)
return super(NumericalExpression, cls).__new__(
cls,
inputs=binds,
expr=expr,
dtype=dtype,
window_safe=window_safe,
)
def _init(self, expr, *args, **kwargs):
self._expr = expr
return super(NumericalExpression, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, expr, *args, **kwargs):
return (
super(NumericalExpression, cls)._static_identity(*args, **kwargs),
expr,
)
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, other)
return self_expr, other_expr, new_inputs
@property
def bindings(self):
return {
"x_%d" % i: input_
for i, input_ in enumerate(self.inputs)
}
def __repr__(self):
return "{typename}(expr='{expr}', bindings={bindings})".format(
typename=type(self).__name__,
expr=self._expr,
bindings=self.bindings,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
return "Expression:\l {}\l".format(
final,
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/expression.py | expression.py |
from zipline.errors import UnsupportedPipelineOutput
from zipline.utils.input_validation import (
expect_element,
expect_types,
optional,
)
from .graph import ExecutionPlan, TermGraph
from .filters import Filter
from .term import AssetExists, ComputableTerm, Term
class Pipeline(object):
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
`Term` instances, and 'screen', a Filter representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
``attach_pipeline`` in their ``initialize`` function to register that the
pipeline should be computed each trading day. The outputs of a pipeline on
a given day can be accessed by calling ``pipeline_output`` in
``handle_data`` or ``before_trading_start``.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.term.Filter, optional
Initial screen.
"""
__slots__ = ('_columns', '_screen', '__weakref__')
@expect_types(
columns=optional(dict),
screen=optional(Filter),
)
def __init__(self, columns=None, screen=None):
if columns is None:
columns = {}
validate_column = self.validate_column
for column_name, term in columns.items():
validate_column(column_name, term)
if not isinstance(term, ComputableTerm):
raise TypeError(
"Column {column_name!r} contains an invalid pipeline term "
"({term}). Did you mean to append '.latest'?".format(
column_name=column_name, term=term,
)
)
self._columns = columns
self._screen = screen
@property
def columns(self):
"""
The columns registered with this pipeline.
"""
return self._columns
@property
def screen(self):
"""
The screen applied to the rows of this pipeline.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""
Add a column.
The results of computing `term` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""
Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.term.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter, overwrite=(bool, int))
def set_screen(self, screen, overwrite=False):
"""
Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_execution_plan(self,
screen_name,
default_screen,
all_dates,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
screen_name : str
Name to supply for self.screen.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
"""
return ExecutionPlan(
self._prepare_graph_terms(screen_name, default_screen),
all_dates,
start_date,
end_date,
)
def to_simple_graph(self, screen_name, default_screen):
"""
Compile into a simple TermGraph with no extra row metadata.
Parameters
----------
screen_name : str
Name to supply for self.screen.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
"""
return TermGraph(
self._prepare_graph_terms(screen_name, default_screen)
)
def _prepare_graph_terms(self, screen_name, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[screen_name] = screen
return columns
@expect_element(format=('svg', 'png', 'jpeg'))
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph('', AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
@staticmethod
@expect_types(term=Term, column_name=str)
def validate_column(column_name, term):
if term.ndim == 1:
raise UnsupportedPipelineOutput(column_name=column_name, term=term) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/pipeline.py | pipeline.py |
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from numpy import array
from pandas import DataFrame, MultiIndex
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import explode
from .term import AssetExists, InputDates, LoadableTerm
from zipline.utils.date_utils import compute_date_range_chunks
from zipline.utils.pandas_utils import categorical_df_concat
from zipline.utils.sharedoc import copydoc
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for ``pipeline`` between ``start_date`` and
``end_date``.
Returns a DataFrame with a MultiIndex of (date, asset) pairs.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
"""
Compute values for `pipeline` in number of days equal to `chunksize`
and return stitched up result. Computing in chunks is useful for
pipelines computed over a long period of time.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
)
def __init__(self,
get_loader,
calendar,
asset_finder,
populate_initial_workspace=None):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `pipeline`. Topologically
sort the graph to determine an order in which we can compute the
terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for
each known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing
pipeline.screen. The sum, N, of all these values is the total
number of rows in our output frame, so we pre-allocate an output
array of length N for each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by ``Pipeline.to_graph``.
Step 1 is performed in ``SimplePipelineEngine._compute_root_mask``.
Step 2 is performed in ``SimplePipelineEngine.compute_chunk``.
Steps 3, 4, and 5 are performed in ``SimplePiplineEngine._to_narrow``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.term.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline`
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_execution_plan(
screen_name,
self._root_mask_term,
self._calendar,
start_date,
end_date,
)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
initial_workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
graph,
dates,
assets,
)
results = self.compute_chunk(
graph,
dates,
assets,
initial_workspace,
)
return self._to_narrow(
graph.outputs,
results,
results.pop(screen_name),
dates[extra_rows:],
assets,
)
@copydoc(PipelineEngine.run_chunked_pipeline)
def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize):
ranges = compute_date_range_chunks(
self._calendar,
start_date,
end_date,
chunksize,
)
chunks = [self.run_pipeline(pipeline, s, e) for s, e in ranges]
if len(chunks) == 1:
# OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`
# if we don't have to.
return chunks[0]
return categorical_df_concat(chunks, inplace=True)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=calendar[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False,
# TODO: update this when we add domains.
country_codes={'??', 'US'},
)
if lifetimes.index[extra_rows] != start_date:
raise ValueError(
'The first date of the lifetimes matrix does not match the'
' start date of the pipeline. Did you forget to align the'
' start_date to the trading calendar?'
)
if lifetimes.index[-1] != end_date:
raise ValueError(
'The last date of the lifetimes matrix does not match the'
' start date of the pipeline. Did you forget to align the'
' end_date to the trading calendar?'
)
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist from the farthest look back
# window through the end of the requested dates.
existed = lifetimes.any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
if shape[0] * shape[1] == 0:
raise ValueError(
"Found only empty asset-days between {} and {}.\n"
"This probably means that either your asset db is out of date"
" or that you're trying to run a Pipeline during a period with"
" no market days.".format(start_date, end_date),
)
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in term.inputs:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
refcounts = graph.initial_refcounts(workspace)
execution_order = graph.execution_order(refcounts)
# If loadable terms share the same loader and extra_rows, load them all
# together.
loadable_terms = graph.loadable_terms
loader_group_key = juxt(get_loader, getitem(graph.extra_rows))
loader_groups = groupby(
loader_group_key,
# Only produce loader groups for the terms we expect to load. This
# ensures that we can run pipelines for graphs where we don't have
# a loader registered for an atomic term if all the dependencies of
# that term were supplied in the initial workspace.
(t for t in execution_order if t in loadable_terms),
)
for term in graph.execution_order(refcounts):
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute loadable terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
)
assert set(loaded) == set(to_load), (
'loader did not return an AdjustedArray for each column\n'
'expected: %r\n'
'got: %r' % (sorted(to_load), sorted(loaded))
)
workspace.update(loaded)
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms whose
# refcounts hit 0.
for garbage_term in graph.decref_dependencies(term, refcounts):
del workspace[garbage_term]
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
return DataFrame(
data=final_columns,
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/engine.py | engine.py |
from networkx import (
DiGraph,
topological_sort,
)
from six import iteritems, itervalues
from zipline.utils.memoize import lazyval
from zipline.pipeline.visualize import display_graph
from .term import LoadableTerm
class CyclicDependency(Exception):
pass
class TermGraph(object):
"""
An abstract representation of Pipeline Term dependencies.
This class does not keep any additional metadata about any term relations
other than dependency ordering. As such it is only useful in contexts
where you care exclusively about order properties (for example, when
drawing visualizations of execution order).
Parameters
----------
terms : dict
A dict mapping names to final output terms.
Attributes
----------
outputs
Methods
-------
ordered()
Return a topologically-sorted iterator over the terms in self.
See Also
--------
ExecutionPlan
"""
def __init__(self, terms):
self.graph = DiGraph()
self._frozen = False
parents = set()
for term in itervalues(terms):
self._add_to_graph(term, parents)
# No parents should be left between top-level terms.
assert not parents
self._outputs = terms
# Mark that no more terms should be added to the graph.
self._frozen = True
def _add_to_graph(self, term, parents):
"""
Add a term and all its children to ``graph``.
``parents`` is the set of all the parents of ``term` that we've added
so far. It is only used to detect dependency cycles.
"""
if self._frozen:
raise ValueError(
"Can't mutate %s after construction." % type(self).__name__
)
# If we've seen this node already as a parent of the current traversal,
# it means we have an unsatisifiable dependency. This should only be
# possible if the term's inputs are mutated after construction.
if term in parents:
raise CyclicDependency(term)
parents.add(term)
self.graph.add_node(term)
for dependency in term.dependencies:
self._add_to_graph(dependency, parents)
self.graph.add_edge(dependency, term)
parents.remove(term)
@property
def outputs(self):
"""
Dict mapping names to designated output terms.
"""
return self._outputs
def execution_order(self, refcounts):
"""
Return a topologically-sorted iterator over the terms in ``self`` which
need to be computed.
"""
return iter(topological_sort(
self.graph.subgraph(
{term for term, refcount in refcounts.items() if refcount > 0},
),
))
def ordered(self):
return iter(topological_sort(self.graph))
@lazyval
def loadable_terms(self):
return {term for term in self.graph if isinstance(term, LoadableTerm)}
@lazyval
def jpeg(self):
return display_graph(self, 'jpeg')
@lazyval
def png(self):
return display_graph(self, 'png')
@lazyval
def svg(self):
return display_graph(self, 'svg')
def _repr_png_(self):
return self.png.data
def initial_refcounts(self, initial_terms):
"""
Calculate initial refcounts for execution of this graph.
Parameters
----------
initial_terms : iterable[Term]
An iterable of terms that were pre-computed before graph execution.
Each node starts with a refcount equal to its outdegree, and output
nodes get one extra reference to ensure that they're still in the graph
at the end of execution.
"""
refcounts = self.graph.out_degree()
for t in self.outputs.values():
refcounts[t] += 1
for t in initial_terms:
self._decref_depencies_recursive(t, refcounts, set())
return refcounts
def _decref_depencies_recursive(self, term, refcounts, garbage):
"""
Decrement terms recursively.
Notes
-----
This should only be used to build the initial workspace, after that we
should use:
:meth:`~zipline.pipeline.graph.TermGraph.decref_dependencies`
"""
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
self._decref_depencies_recursive(parent, refcounts, garbage)
def decref_dependencies(self, term, refcounts):
"""
Decrement in-edges for ``term`` after computation.
Parameters
----------
term : zipline.pipeline.Term
The term whose parents should be decref'ed.
refcounts : dict[Term -> int]
Dictionary of refcounts.
Return
------
garbage : set[Term]
Terms whose refcounts hit zero after decrefing.
"""
garbage = set()
# Edges are tuple of (from, to).
for parent, _ in self.graph.in_edges([term]):
refcounts[parent] -= 1
# No one else depends on this term. Remove it from the
# workspace to conserve memory.
if refcounts[parent] == 0:
garbage.add(parent)
return garbage
class ExecutionPlan(TermGraph):
"""
Graph represention of Pipeline Term dependencies that includes metadata
about extra rows required to perform computations.
Each node in the graph has an `extra_rows` attribute, indicating how many,
if any, extra rows we should compute for the node. Extra rows are most
often needed when a term is an input to a rolling window computation. For
example, if we compute a 30 day moving average of price from day X to day
Y, we need to load price data for the range from day (X - 29) to day Y.
Parameters
----------
terms : dict
A dict mapping names to final output terms.
all_dates : pd.DatetimeIndex
An index of all known trading days for which ``terms`` will be
computed.
start_date : pd.Timestamp
The first date for which output is requested for ``terms``.
end_date : pd.Timestamp
The last date for which output is requested for ``terms``.
Attributes
----------
outputs
offset
extra_rows
Methods
-------
ordered()
Return a topologically-sorted iterator over the terms in self.
"""
def __init__(self,
terms,
all_dates,
start_date,
end_date,
min_extra_rows=0):
super(ExecutionPlan, self).__init__(terms)
for term in terms.values():
self.set_extra_rows(
term,
all_dates,
start_date,
end_date,
min_extra_rows=min_extra_rows,
)
def set_extra_rows(self,
term,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Compute ``extra_rows`` for transitive dependencies of ``root_terms``
"""
# A term can require that additional extra rows beyond the minimum be
# computed. This is most often used with downsampled terms, which need
# to ensure that the first date is a computation date.
extra_rows_for_term = term.compute_extra_rows(
all_dates,
start_date,
end_date,
min_extra_rows,
)
if extra_rows_for_term < min_extra_rows:
raise ValueError(
"term %s requested fewer rows than the minimum of %d" % (
term, min_extra_rows,
)
)
self._ensure_extra_rows(term, extra_rows_for_term)
for dependency, additional_extra_rows in term.dependencies.items():
self.set_extra_rows(
dependency,
all_dates,
start_date,
end_date,
min_extra_rows=extra_rows_for_term + additional_extra_rows,
)
@lazyval
def offset(self):
"""
For all pairs (term, input) such that `input` is an input to `term`,
compute a mapping::
(term, input) -> offset(term, input)
where ``offset(term, input)`` is the number of rows that ``term``
should truncate off the raw array produced for ``input`` before using
it. We compute this value as follows::
offset(term, input) = (extra_rows_computed(input)
- extra_rows_computed(term)
- requested_extra_rows(term, input))
Examples
--------
Case 1
~~~~~~
Factor A needs 5 extra rows of USEquityPricing.close, and Factor B
needs 3 extra rows of the same. Factor A also requires 5 extra rows of
USEquityPricing.high, which no other Factor uses. We don't require any
extra rows of Factor A or Factor B
We load 5 extra rows of both `price` and `high` to ensure we can
service Factor A, and the following offsets get computed::
offset[Factor A, USEquityPricing.close] == (5 - 0) - 5 == 0
offset[Factor A, USEquityPricing.high] == (5 - 0) - 5 == 0
offset[Factor B, USEquityPricing.close] == (5 - 0) - 3 == 2
offset[Factor B, USEquityPricing.high] raises KeyError.
Case 2
~~~~~~
Factor A needs 5 extra rows of USEquityPricing.close, and Factor B
needs 3 extra rows of Factor A, and Factor B needs 2 extra rows of
USEquityPricing.close.
We load 8 extra rows of USEquityPricing.close (enough to load 5 extra
rows of Factor A), and the following offsets get computed::
offset[Factor A, USEquityPricing.close] == (8 - 3) - 5 == 0
offset[Factor B, USEquityPricing.close] == (8 - 0) - 2 == 6
offset[Factor B, Factor A] == (3 - 0) - 3 == 0
Notes
-----
`offset(term, input) >= 0` for all valid pairs, since `input` must be
an input to `term` if the pair appears in the mapping.
This value is useful because we load enough rows of each input to serve
all possible dependencies. However, for any given dependency, we only
want to compute using the actual number of required extra rows for that
dependency. We can do so by truncating off the first `offset` rows of
the loaded data for `input`.
See Also
--------
zipline.pipeline.graph.TermGraph.offset
zipline.pipeline.engine.SimplePipelineEngine._inputs_for_term
zipline.pipeline.engine.SimplePipelineEngine._mask_and_dates_for_term
"""
extra = self.extra_rows
return {
# Another way of thinking about this is:
# How much bigger is the array for ``dep`` compared to ``term``?
# How much of that difference did I ask for.
(term, dep): (extra[dep] - extra[term]) - requested_extra_rows
for term in self.graph
for dep, requested_extra_rows in term.dependencies.items()
}
@lazyval
def extra_rows(self):
"""
A dict mapping `term` -> `# of extra rows to load/compute of `term`.
Notes
----
This value depends on the other terms in the graph that require `term`
**as an input**. This is not to be confused with `term.dependencies`,
which describes how many additional rows of `term`'s inputs we need to
load, and which is determined entirely by `Term` itself.
Examples
--------
Our graph contains the following terms:
A = SimpleMovingAverage([USEquityPricing.high], window_length=5)
B = SimpleMovingAverage([USEquityPricing.high], window_length=10)
C = SimpleMovingAverage([USEquityPricing.low], window_length=8)
To compute N rows of A, we need N + 4 extra rows of `high`.
To compute N rows of B, we need N + 9 extra rows of `high`.
To compute N rows of C, we need N + 7 extra rows of `low`.
We store the following extra_row requirements:
self.extra_rows[high] = 9 # Ensures that we can service B.
self.extra_rows[low] = 7
See Also
--------
zipline.pipeline.graph.TermGraph.offset
zipline.pipeline.term.Term.dependencies
"""
return {
term: attrs['extra_rows']
for term, attrs in iteritems(self.graph.node)
}
def _ensure_extra_rows(self, term, N):
"""
Ensure that we're going to compute at least N extra rows of `term`.
"""
attrs = self.graph.node[term]
attrs['extra_rows'] = max(N, attrs.get('extra_rows', 0))
def mask_and_dates_for_term(self,
term,
root_mask_term,
workspace,
all_dates):
"""
Load mask and mask row labels for term.
Parameters
----------
term : Term
The term to load the mask and labels for.
root_mask_term : Term
The term that represents the root asset exists mask.
workspace : dict[Term, any]
The values that have been computed for each term.
all_dates : pd.DatetimeIndex
All of the dates that are being computed for in the pipeline.
Returns
-------
mask : np.ndarray
The correct mask for this term.
dates : np.ndarray
The slice of dates for this term.
"""
mask = term.mask
mask_offset = self.extra_rows[mask] - self.extra_rows[term]
# This offset is computed against root_mask_term because that is what
# determines the shape of the top-level dates array.
dates_offset = (
self.extra_rows[root_mask_term] - self.extra_rows[term]
)
return workspace[mask][mask_offset:], all_dates[dates_offset:] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/graph.py | graph.py |
from abc import ABCMeta, abstractproperty
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
searchsorted,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonExistentAssetInTimeFrame,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
"""
Base class for terms in a Pipeline API compute graph.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
domain = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can set override these class-level attributes to provide
# default values.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
return Slice(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : object
Unused placeholder.
dtype : np.dtype
Dtype of this term's output.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as direct inputs for this Term.
"""
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
"""
A Filter representing asset/date pairs to include while
computing this Term. (True means include; False means exclude.)
"""
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError('dependencies')
def graph_repr(self):
"""A short repr to use when rendering GraphViz graphs.
"""
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
"""A short repr to use when recursively rendering terms with inputs.
"""
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
for input_ in inputs:
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError()
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
def _downsampled_type(self, *args, **kwargs):
"""
The expression type to return from self.downsample().
"""
raise NotImplementedError(
"downsampling is not yet implemented "
"for instances of %s." % type(self).__name__
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
return self._downsampled_type(term=self, frequency=frequency)
def _aliased_type(self, *args, **kwargs):
"""
The expression type to return from self.alias().
"""
raise NotImplementedError(
"alias is not yet implemented "
"for instances of %s." % type(self).__name__
)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
return self._aliased_type(term=self, name=name)
def __repr__(self):
return (
"{type}([{inputs}], {window_length})"
).format(
type=type(self).__name__,
inputs=', '.join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + '(...)'
class Slice(ComputableTerm):
"""
Term for extracting a single column of a another term's output.
Parameters
----------
term : zipline.pipeline.term.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(Slice, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}])".format(
type=type(self).__name__,
parent_term=self.inputs[0].__name__,
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(Slice, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (super(Slice, cls)._static_identity(*args, **kwargs), asset)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
"""Get the asset whose data is selected by this slice.
"""
return self._asset
@property
def _downsampled_type(self):
raise NotImplementedError(
'downsampling of slices is not yet supported'
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
if (dtype == categorical_dtype):
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(missing_value)
# For any other type, we can check if the missing_value is safe by
# making an array of that value and trying to safely convert it to
# the desired type.
# 'same_kind' allows casting between things like float32 and
# float64, but not str and int.
array([missing_value]).astype(dtype=dtype, casting='same_kind')
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"Categorical terms must have missing values of type "
"{types}.".format(
types=' or '.join([t.__name__ for t in label_types]),
)
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/term.py | term.py |
from __future__ import division
from numpy import (
abs,
average,
clip,
diff,
dstack,
inf,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nanmin,
)
from zipline.utils.numpy_utils import rolling_window
from .basic import exponential_weights
from .basic import ( # noqa reexport
# These are re-exported here for backwards compatibility with the old
# definition site.
LinearWeightedMovingAverage,
MaxDrawdown,
SimpleMovingAverage,
VWAP,
WeightedAverageValue
)
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [USEquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (USEquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (USEquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator # noqa
**Defaults Inputs:** USEquityPricing.low, USEquityPricing.high
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
"""
inputs = (USEquityPricing.low, USEquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data: `zipline.pipeline.data.USEquityPricing.close`
:data: `zipline.pipeline.data.USEquityPricing.low`
:data: `zipline.pipeline.data.USEquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (USEquityPricing.close, USEquityPricing.low, USEquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud # noqa
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
"""
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (USEquityPricing.high, USEquityPricing.low, USEquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.high`
:data:`zipline.pipeline.data.USEquityPricing.low`
:data:`zipline.pipeline.data.USEquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
USEquityPricing.high,
USEquityPricing.low,
USEquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.USEquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (USEquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
# Convenience aliases.
MACDSignal = MovingAverageConvergenceDivergenceSignal | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/factors/technical.py | technical.py |
import numpy as np
from numpy import broadcast_arrays
from scipy.stats import (
linregress,
pearsonr,
spearmanr,
)
from zipline.assets import Asset
from zipline.errors import IncompatibleTerms
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.filters import SingleAsset
from zipline.pipeline.mixins import SingleInputMixin, StandardOutputs
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists
from zipline.utils.input_validation import (
expect_bounded,
expect_dtypes,
expect_types,
)
from zipline.utils.math_utils import nanmean
from zipline.utils.numpy_utils import (
float64_dtype,
int64_dtype,
)
from .basic import Returns
ALLOWED_DTYPES = (float64_dtype, int64_dtype)
class _RollingCorrelation(CustomFactor, SingleInputMixin):
@expect_dtypes(base_factor=ALLOWED_DTYPES, target=ALLOWED_DTYPES)
@expect_bounded(correlation_length=(2, None))
def __new__(cls,
base_factor,
target,
correlation_length,
mask=NotSpecified):
if target.ndim == 2 and base_factor.mask is not target.mask:
raise IncompatibleTerms(term_1=base_factor, term_2=target)
return super(_RollingCorrelation, cls).__new__(
cls,
inputs=[base_factor, target],
window_length=correlation_length,
mask=mask,
)
class RollingPearson(_RollingCorrelation):
"""
A Factor that computes pearson correlation coefficients between the columns
of a given Factor and either the columns of another Factor/BoundColumn or a
slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.factors.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.pearsonr`
:meth:`Factor.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
Notes
-----
Most users should call Factor.pearsonr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = pearsonr(base_data[:, i], target_data[:, i])[0]
class RollingSpearman(_RollingCorrelation):
"""
A Factor that computes spearman rank correlation coefficients between the
columns of a given Factor and either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.factors.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
Notes
-----
Most users should call Factor.spearmanr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = spearmanr(base_data[:, i], target_data[:, i])[0]
class RollingLinearRegression(CustomFactor, SingleInputMixin):
"""
A Factor that performs an ordinary least-squares regression predicting the
columns of a given Factor from either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
dependent : zipline.pipeline.factors.Factor
The factor whose columns are the predicted/dependent variable of each
regression with `independent`.
independent : zipline.pipeline.slice.Slice or zipline.pipeline.Factor
The factor/slice whose columns are the predictor/independent variable
of each regression with `dependent`. If `independent` is a Factor,
regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `dependent` should be
regressed against `independent` each day.
See Also
--------
:func:`scipy.stats.linregress`
:meth:`Factor.linear_regression`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
Notes
-----
Most users should call Factor.linear_regression rather than directly
construct an instance of this class.
"""
outputs = ['alpha', 'beta', 'r_value', 'p_value', 'stderr']
@expect_dtypes(dependent=ALLOWED_DTYPES, independent=ALLOWED_DTYPES)
@expect_bounded(regression_length=(2, None))
def __new__(cls,
dependent,
independent,
regression_length,
mask=NotSpecified):
if independent.ndim == 2 and dependent.mask is not independent.mask:
raise IncompatibleTerms(term_1=dependent, term_2=independent)
return super(RollingLinearRegression, cls).__new__(
cls,
inputs=[dependent, independent],
window_length=regression_length,
mask=mask,
)
def compute(self, today, assets, out, dependent, independent):
alpha = out.alpha
beta = out.beta
r_value = out.r_value
p_value = out.p_value
stderr = out.stderr
def regress(y, x):
regr_results = linregress(y=y, x=x)
# `linregress` returns its results in the following order:
# slope, intercept, r-value, p-value, stderr
alpha[i] = regr_results[1]
beta[i] = regr_results[0]
r_value[i] = regr_results[2]
p_value[i] = regr_results[3]
stderr[i] = regr_results[4]
# If `independent` is a Slice or single column of data, broadcast it
# out to the same shape as `dependent`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
independent = broadcast_arrays(independent, dependent)[0]
for i in range(len(out)):
regress(y=dependent[:, i], x=independent[:, i])
class RollingPearsonOfReturns(RollingPearson):
"""
Calculates the Pearson product-moment correlation coefficient of the
returns of the given asset with the returns of all other assets.
Pearson correlation is what most people mean when they say "correlation
coefficient" or "R-value".
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in SPY's rolling returns correlation with each
stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that
is, we calculate each correlation coefficient over 5 days of data). We can
achieve this by doing::
rolling_correlations = RollingPearsonOfReturns(
target=sid(8554),
returns_length=10,
correlation_length=5,
)
The result of computing ``rolling_correlations`` from 2017-03-17 to
2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .15 -.96
2017-03-20 1 .10 -.96
2017-03-21 1 -.16 -.94
2017-03-22 1 -.16 -.85
Note that the column for SPY is all 1's, as the correlation of any data
series with itself is always 1. To understand how each of the other values
were calculated, take for example the .15 in MSFT's column. This is the
correlation coefficient between SPY's returns looking back from 2017-03-17
(-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04).
See Also
--------
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingPearsonOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingSpearmanOfReturns(RollingSpearman):
"""
Calculates the Spearman rank correlation coefficient of the returns of the
given asset with the returns of all other assets.
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingSpearmanOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingLinearRegressionOfReturns(RollingLinearRegression):
"""
Perform an ordinary least-squares regression predicting the returns of all
other assets on the given asset.
Parameters
----------
target : zipline.assets.Asset
The asset to regress against all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
regression_length : int >= 1
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed against the target
asset each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which regressions are computed.
This factor is designed to return five outputs:
- alpha, a factor that computes the intercepts of each regression.
- beta, a factor that computes the slopes of each regression.
- r_value, a factor that computes the correlation coefficient of each
regression.
- p_value, a factor that computes, for each regression, the two-sided
p-value for a hypothesis test whose null hypothesis is that the slope is
zero.
- stderr, a factor that computes the standard error of the estimate of each
regression.
For more help on factors with multiple outputs, see
:class:`zipline.pipeline.factors.CustomFactor`.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in predicting each stock's returns from SPY's
over rolling 5-day look back windows. We can compute rolling regression
coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing::
regression_factor = RollingRegressionOfReturns(
target=sid(8554),
returns_length=10,
regression_length=5,
)
alpha = regression_factor.alpha
beta = regression_factor.beta
The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 0 .011 .003
2017-03-20 0 -.004 .004
2017-03-21 0 .007 .006
2017-03-22 0 .002 .008
And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .3 -1.1
2017-03-20 1 .2 -1
2017-03-21 1 -.3 -1
2017-03-22 1 -.3 -.9
Note that SPY's column for alpha is all 0's and for beta is all 1's, as the
regression line of SPY with itself is simply the function y = x.
To understand how each of the other values were calculated, take for
example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3,
respectively). These values are the result of running a linear regression
predicting MSFT's returns from SPY's returns, using values starting at
2017-03-17 and looking back 5 days. That is, the regression was run with
x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it
produced a slope of .3 and an intercept of .011.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
"""
window_safe = True
def __new__(cls,
target,
returns_length,
regression_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingLinearRegressionOfReturns, cls).__new__(
cls,
dependent=returns,
independent=returns[target],
regression_length=regression_length,
mask=mask,
)
class SimpleBeta(CustomFactor, StandardOutputs):
"""
Factor producing the slope of a regression line between each asset's daily
returns to the daily returns of a single "target" asset.
Parameters
----------
target : zipline.Asset
Asset against which other assets should be regressed.
regression_length : int
Number of days of daily returns to use for the regression.
allowed_missing_percentage : float, optional
Percentage of returns observations (between 0 and 1) that are allowed
to be missing when calculating betas. Assets with more than this
percentage of returns observations missing will produce values of
NaN. Default behavior is that 25% of inputs can be missing.
"""
window_safe = True
dtype = float64_dtype
params = ('allowed_missing_count',)
@expect_types(
target=Asset,
regression_length=int,
allowed_missing_percentage=(int, float),
__funcname='SimpleBeta',
)
@expect_bounded(
regression_length=(3, None),
allowed_missing_percentage=(0.0, 1.0),
__funcname='SimpleBeta',
)
def __new__(cls,
target,
regression_length,
allowed_missing_percentage=0.25):
daily_returns = Returns(
window_length=2,
mask=(AssetExists() | SingleAsset(asset=target)),
)
allowed_missing_count = int(
allowed_missing_percentage * regression_length
)
return super(SimpleBeta, cls).__new__(
cls,
inputs=[daily_returns, daily_returns[target]],
window_length=regression_length,
allowed_missing_count=allowed_missing_count,
)
def compute(self,
today,
assets,
out,
all_returns,
target_returns,
allowed_missing_count):
vectorized_beta(
dependents=all_returns,
independent=target_returns,
allowed_missing=allowed_missing_count,
out=out,
)
def graph_repr(self):
return "{}({!r}, {}, {})".format(
type(self).__name__,
str(self.target.symbol), # coerce from unicode to str in py2.
self.window_length,
self.params['allowed_missing_count'],
)
@property
def target(self):
"""Get the target of the beta calculation.
"""
return self.inputs[1].asset
def __repr__(self):
return "{}({}, length={}, allowed_missing={})".format(
type(self).__name__,
self.target,
self.window_length,
self.params['allowed_missing_count'],
)
def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in both ``dependents`` and
``independents`` will output NaN as the regression coefficient.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/factors/statistical.py | statistical.py |
from operator import attrgetter
from numbers import Number
from math import ceil
from numpy import empty_like, inf, isnan, nan, where
from scipy.stats import rankdata
from zipline.utils.compat import wraps
from zipline.errors import (
BadPercentileBounds,
UnknownRankMethod,
UnsupportedDataType,
)
from zipline.lib.normalize import naive_grouped_rowwise_apply
from zipline.lib.rank import masked_rankdata_2d, rankdata_1d_descending
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.classifiers import Classifier, Everything, Quantiles
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
MaximumFilter,
NotNullFilter,
NullFilter,
)
from zipline.pipeline.mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.sentinels import NotSpecified, NotSpecifiedType
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.functional import with_doc, with_name
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import nanmean, nanstd
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import (
bool_dtype,
coerce_to_dtype,
float64_dtype,
)
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
def binop_return_type(op):
if is_comparison(op):
return NumExprFilter
else:
return NumExprFactor
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
@with_doc("Binary Operator: '%s'" % op)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = binop_return_type(op)
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
@with_doc(func)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
# Decorators for Factor methods.
if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}.\n"
"{method_name}() is only defined for dtype {expected_dtype}."
"To filter missing data, use isnull() or notnull()."
)
)
float64_only = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() is only defined on Factors of dtype {expected_dtype},"
" but it was called on a Factor of dtype {received_dtype}."
)
)
class Factor(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline API expression producing a numerical or date-valued output.
Factors are the most commonly-used Pipeline term, representing the result
of any computation producing a numerical result.
Factors can be combined, both with other Factors and with scalar values,
via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).
This makes it easy to write complex expressions that combine multiple
Factors. For example, constructing a Factor that computes the average of
two other Factors is simply::
>>> f1 = SomeFactor(...) # doctest: +SKIP
>>> f2 = SomeOtherFactor(...) # doctest: +SKIP
>>> average = (f1 + f2) / 2.0 # doctest: +SKIP
Factors can also be converted into :class:`zipline.pipeline.Filter` objects
via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).
There are many natural operators defined on Factors besides the basic
numerical operators. These include methods identifying missing or
extreme-valued outputs (isnull, notnull, isnan, notnan), methods for
normalizing outputs (rank, demean, zscore), and methods for constructing
Filters based on rank-order properties of results (top, bottom,
percentile_between).
"""
ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {'=='})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update(
{
unary_op_name(op): unary_operator(op)
for op in UNARY_OPS
}
)
clsdict.update(
{
funcname: function_application(funcname)
for funcname in NUMEXPR_MATH_FUNCS
}
)
__truediv__ = clsdict['__div__']
__rtruediv__ = clsdict['__rdiv__']
del clsdict # don't pollute the class namespace with this.
eq = binary_operator('==')
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
)
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.factors.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
)
def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.factors.Rank
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`zipline.pipeline.factors.factor.Rank`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingPearson
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between `target` and the columns of `self`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.factors.RollingSpearman
A new Factor that will compute correlations between `target` and
the columns of `self`.
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, regression_length=int, mask=(Filter, NotSpecifiedType),
)
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
This method can only be called on factors which are deemed safe for use
as inputs to other factors. This includes `Returns` and any factors
created from `Factor.rank` or `Factor.zscore`.
Parameters
----------
target : zipline.pipeline.Term with a numeric dtype
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.factors.RollingLinearRegression
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
)
@expect_types(
min_percentile=(int, float),
max_percentile=(int, float),
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
)
@expect_types(bins=int, mask=(Filter, NotSpecifiedType))
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quartiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quartiles over the output of ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, corresponding to the first, second, third, or fourth
quartile over each row. NaN data points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quartiles.
Returns
-------
quartiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 3.
"""
return self.quantiles(bins=4, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quintiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quintile labels on ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data
points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quintiles.
Returns
-------
quintiles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 4.
"""
return self.quantiles(bins=5, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def deciles(self, mask=NotSpecified):
"""
Construct a Classifier computing decile labels on ``self``.
Every non-NaN data point the output is labelled with a value from 0 to
9 corresonding to deciles over each row. NaN data points are labelled
with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing deciles.
Returns
-------
deciles : zipline.pipeline.classifiers.Quantiles
A Classifier producing integer labels ranging from 0 to 9.
"""
return self.quantiles(bins=10, mask=mask)
def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
def _maximum(self, mask=NotSpecified, groupby=NotSpecified):
return MaximumFilter(self, groupby=groupby, mask=mask)
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a new Filter representing entries from the output of this
Factor that fall within the percentile range defined by min_percentile
and max_percentile.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.filters.PercentileFilter
A new filter that will compute the specified percentile-range mask.
See Also
--------
zipline.pipeline.filters.filter.PercentileFilter
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
def isnull(self):
"""
A Filter producing True for values where this Factor has missing data.
Equivalent to self.isnan() when ``self.dtype`` is float64.
Otherwise equivalent to ``self.eq(self.missing_value)``.
Returns
-------
filter : zipline.pipeline.filters.Filter
"""
if self.dtype == float64_dtype:
# Using isnan is more efficient when possible because we can fold
# the isnan computation with other NumExpr expressions.
return self.isnan()
else:
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this Factor has complete data.
Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.
Otherwise equivalent to ``(self != self.missing_value)``.
"""
return NotNullFilter(self)
@if_not_float64_tell_caller_to_use_isnull
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return self != self
@if_not_float64_tell_caller_to_use_isnull
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.filters.Filter
"""
return ~self.isnan()
@if_not_float64_tell_caller_to_use_isnull
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Factor)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Factor)
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class GroupedRowTransform(Factor):
"""
A Factor that transforms an input factor by applying a row-wise
shape-preserving transformation on classifier-defined groups of that
Factor.
This is most often useful for normalization operators like ``zscore`` or
``demean`` or for performing ranking using ``rank``.
Parameters
----------
transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]
Function to apply over each row group.
factor : zipline.pipeline.Factor
The factor providing baseline data to transform.
mask : zipline.pipeline.Filter
Mask of entries to ignore when calculating transforms.
groupby : zipline.pipeline.Classifier
Classifier partitioning ``factor`` into groups to use when calculating
means.
transform_args : tuple[hashable]
Additional positional arguments to forward to ``transform``.
Notes
-----
Users should rarely construct instances of this factor directly. Instead,
they should construct instances via factor normalization methods like
``zscore`` and ``demean`` or using ``rank`` with ``groupby``.
See Also
--------
zipline.pipeline.factors.Factor.zscore
zipline.pipeline.factors.Factor.demean
zipline.pipeline.factors.Factor.rank
"""
window_length = 0
def __new__(cls,
transform,
transform_args,
factor,
groupby,
dtype,
missing_value,
mask,
**kwargs):
if mask is NotSpecified:
mask = factor.mask
else:
mask = mask & factor.mask
if groupby is NotSpecified:
groupby = Everything(mask=mask)
return super(GroupedRowTransform, cls).__new__(
GroupedRowTransform,
transform=transform,
transform_args=transform_args,
inputs=(factor, groupby),
missing_value=missing_value,
mask=mask,
dtype=dtype,
**kwargs
)
def _init(self, transform, transform_args, *args, **kwargs):
self._transform = transform
self._transform_args = transform_args
return super(GroupedRowTransform, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, transform, transform_args, *args, **kwargs):
return (
super(GroupedRowTransform, cls)._static_identity(*args, **kwargs),
transform,
transform_args,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
# Make a copy with the null code written to masked locations.
group_labels = where(mask, group_labels, null_label)
return where(
group_labels != null_label,
naive_grouped_rowwise_apply(
data=data,
group_labels=group_labels,
func=self._transform,
func_args=self._transform_args,
out=empty_like(data, dtype=self.dtype),
),
self.missing_value,
)
@property
def transform_name(self):
return self._transform.__name__
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + '(%r)' % self.transform_name
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.factors.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`Factor.rank`
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
window_safe = True
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls)._static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
return "{type}({input_}, method='{method}', mask={mask})".format(
type=type(self).__name__,
input_=self.inputs[0],
method=self._method,
mask=self.mask,
)
def graph_repr(self):
return "Rank:\l method: {!r}\l mask: {}\l".format(
self._method,
type(self.mask).__name__,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is not passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
outputs : iterable[str], optional
An iterable of strings which represent the names of each output this
factor should compute and return. If this argument is not passed to the
CustomFactor constructor, we look for a class-level attribute named
`outputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
mask : zipline.pipeline.Filter, optional
A Filter describing the assets on which we should compute each day.
Each call to ``CustomFactor.compute`` will only receive assets for
which ``mask`` produced True on the day for which compute is being
called.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`. If multiple outputs are
specified, `compute` should write its desired return values into
`out.<output_name>` for each output name in `self.outputs`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
A CustomFactor with multiple outputs:
.. code-block:: python
class MultipleOutputs(CustomFactor):
inputs = [USEquityPricing.close]
outputs = ['alpha', 'beta']
window_length = N
def compute(self, today, assets, out, close):
computed_alpha, computed_beta = some_function(close)
out.alpha[:] = computed_alpha
out.beta[:] = computed_beta
# Each output is returned as its own Factor upon instantiation.
alpha, beta = MultipleOutputs()
# Equivalently, we can create a single factor instance and access each
# output as an attribute of that instance.
multiple_outputs = MultipleOutputs()
alpha = multiple_outputs.alpha
beta = multiple_outputs.beta
Note: If a CustomFactor has multiple outputs, all outputs must have the
same dtype. For instance, in the example above, if alpha is a float then
beta must also be a float.
'''
dtype = float64_dtype
def _validate(self):
try:
super(CustomFactor, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomClassifier?',
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFilter?',
)
raise
def __getattribute__(self, name):
outputs = object.__getattribute__(self, 'outputs')
if outputs is NotSpecified:
return super(CustomFactor, self).__getattribute__(name)
elif name in outputs:
return RecarrayField(factor=self, attribute=name)
else:
try:
return super(CustomFactor, self).__getattribute__(name)
except AttributeError:
raise AttributeError(
'Instance of {factor} has no output named {attr!r}. '
'Possible choices are: {choices}.'.format(
factor=type(self).__name__,
attr=name,
choices=self.outputs,
)
)
def __iter__(self):
if self.outputs is NotSpecified:
raise ValueError(
'{factor} does not have multiple outputs.'.format(
factor=type(self).__name__,
)
)
return (RecarrayField(self, attr) for attr in self.outputs)
class RecarrayField(SingleInputMixin, Factor):
"""
A single field from a multi-output factor.
"""
def __new__(cls, factor, attribute):
return super(RecarrayField, cls).__new__(
cls,
attribute=attribute,
inputs=[factor],
window_length=0,
mask=factor.mask,
dtype=factor.dtype,
missing_value=factor.missing_value,
window_safe=factor.window_safe
)
def _init(self, attribute, *args, **kwargs):
self._attribute = attribute
return super(RecarrayField, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, attribute, *args, **kwargs):
return (
super(RecarrayField, cls)._static_identity(*args, **kwargs),
attribute,
)
def _compute(self, windows, dates, assets, mask):
return windows[0][self._attribute]
def graph_repr(self):
return "{}.{}".format(self.inputs[0].graph_repr(), self._attribute)
class Latest(LatestMixin, CustomFactor):
"""
Factor producing the most recently-known value of `inputs[0]` on each day.
The `.latest` attribute of DataSet columns returns an instance of this
Factor.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
# Functions to be passed to GroupedRowTransform. These aren't defined inline
# because the transformation function is part of the instance hash key.
def demean(row):
return row - nanmean(row)
def zscore(row):
return (row - nanmean(row)) / nanstd(row)
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/factors/factor.py | factor.py |
from numbers import Number
from numpy import (
arange,
average,
exp,
fmax,
full,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
)
from zipline.pipeline.data import USEquityPricing
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanargmax,
nanmax,
nanmean,
nanstd,
nansum,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
)
from .factor import CustomFactor
from ..mixins import SingleInputMixin
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class DailyReturns(Returns):
"""
Calculates daily percent change in close price.
**Default Inputs**: [USEquityPricing.close]
"""
inputs = [USEquityPricing.close]
window_safe = True
window_length = 2
class SimpleMovingAverage(CustomFactor, SingleInputMixin):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = (USEquityPricing.close, USEquityPricing.volume)
class MaxDrawdown(CustomFactor, SingleInputMixin):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [USEquityPricing.close, USEquityPricing.volume]
**Default Window Length:** None
"""
inputs = [USEquityPricing.close, USEquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[USEquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[USEquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[USEquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[USEquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:func:`pandas.ewma`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.ewmstd`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
class LinearWeightedMovingAverage(CustomFactor, SingleInputMixin):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** :data:`zipline.pipeline.factors.Returns(window_length=2)` # noqa
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {'annualization_factor': 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)
# Convenience aliases
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/factors/basic.py | basic.py |
from numpy import newaxis
from zipline.utils.numpy_utils import (
NaTD,
busday_count_mask_NaT,
datetime64D_dtype,
float64_dtype,
)
from .factor import Factor
class BusinessDaysSincePreviousEvent(Factor):
"""
Abstract class for business days since a previous event.
Returns the number of **business days** (not trading days!) since
the most recent event date for each asset.
This doesn't use trading days for symmetry with
BusinessDaysUntilNextEarnings.
Assets which announced or will announce the event today will produce a
value of 0.0. Assets that announced the event on the previous business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(announce_dates, reference_dates)
class BusinessDaysUntilNextEvent(Factor):
"""
Abstract class for business days since a next event.
Returns the number of **business days** (not trading days!) until
the next known event date for each asset.
This doesn't use trading days because the trading calendar includes
information that may not have been available to the algorithm at the time
when `compute` is called.
For example, the NYSE closings September 11th 2001, would not have been
known to the algorithm on September 10th.
Assets that announced or will announce the event today will produce a value
of 0.0. Assets that will announce the event on the next upcoming business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(reference_dates, announce_dates) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/factors/events.py | events.py |
from functools import partial
from numbers import Number
import operator
import re
from numpy import where, isnan, nan, zeros
import pandas as pd
from zipline.errors import UnsupportedDataType
from zipline.lib.labelarray import LabelArray
from zipline.lib.quantiles import quantiles
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import ComputableTerm
from zipline.utils.compat import unicode
from zipline.utils.input_validation import expect_types, expect_dtypes
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
vectorized_is_element,
)
from ..filters import ArrayPredicate, NotNullFilter, NullFilter, NumExprFilter
from ..mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
string_classifiers_only = restrict_to_dtype(
dtype=categorical_dtype,
message_template=(
"{method_name}() is only defined on Classifiers producing strings"
" but it was called on a Classifier of dtype {received_dtype}."
)
)
class Classifier(RestrictedDTypeMixin, ComputableTerm):
"""
A Pipeline expression computing a categorical output.
Classifiers are most commonly useful for describing grouping keys for
complex transformations on Factor outputs. For example, Factor.demean() and
Factor.zscore() can be passed a Classifier in their ``groupby`` argument,
indicating that means/standard deviations should be computed on assets for
which the classifier produced the same label.
"""
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = CLASSIFIER_DTYPES
categories = NotSpecified
def isnull(self):
"""
A Filter producing True for values where this term has missing data.
"""
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this term has complete data.
"""
return NotNullFilter(self)
# We explicitly don't support classifier to classifier comparisons, since
# the stored values likely don't mean the same thing. This may be relaxed
# in the future, but for now we're starting conservatively.
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other``.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value!r}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
else:
return ArrayPredicate(
term=self,
op=operator.eq,
opargs=(other,),
)
def __ne__(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"((x_0 != {other}) & (x_0 != {missing}))".format(
other=int(other),
missing=self.missing_value,
),
binds=(self,),
)
else:
# Numexpr doesn't know how to use LabelArrays.
return ArrayPredicate(term=self, op=operator.ne, opargs=(other,))
def bad_compare(opname, other):
raise TypeError('cannot compare classifiers with %s' % opname)
__gt__ = partial(bad_compare, '>')
__ge__ = partial(bad_compare, '>=')
__le__ = partial(bad_compare, '<=')
__lt__ = partial(bad_compare, '<')
del bad_compare
@string_classifiers_only
@expect_types(prefix=(bytes, unicode))
def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
)
@string_classifiers_only
@expect_types(suffix=(bytes, unicode))
def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
)
@string_classifiers_only
@expect_types(substring=(bytes, unicode))
def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
)
@string_classifiers_only
@expect_types(pattern=(bytes, unicode, type(re.compile(''))))
def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
)
# TODO: Support relabeling for integer dtypes.
@string_classifiers_only
def relabel(self, relabeler):
"""
Convert ``self`` into a new classifier by mapping a function over each
element produced by ``self``.
Parameters
----------
relabeler : function[str -> str or None]
A function to apply to each unique value produced by ``self``.
Returns
-------
relabeled : Classifier
A classifier produced by applying ``relabeler`` to each unique
value produced by ``self``.
"""
return Relabel(term=self, relabeler=relabeler)
def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
def postprocess(self, data):
if self.dtype == int64_dtype:
return data
if not isinstance(data, LabelArray):
raise AssertionError("Expected a LabelArray, got %s." % type(data))
return data.as_categorical()
def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
'Expected a Categorical, got %r.' % type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
)
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Classifier)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Classifier)
def _to_integral(self, output_array):
"""
Convert an array produced by this classifier into an array of integer
labels and a missing value label.
"""
if self.dtype == int64_dtype:
group_labels = output_array
null_label = self.missing_value
elif self.dtype == categorical_dtype:
# Coerce LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = output_array.as_int_array()
null_label = output_array.missing_value_code
else:
raise AssertionError(
"Unexpected Classifier dtype: %s." % self.dtype
)
return group_labels, null_label
class Everything(Classifier):
"""
A trivial classifier that classifies everything the same.
"""
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
return where(
mask,
zeros(shape=mask.shape, dtype=int64_dtype),
self.missing_value,
)
class Quantiles(SingleInputMixin, Classifier):
"""
A classifier computing quantiles over an input.
"""
params = ('bins',)
dtype = int64_dtype
window_length = 0
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
bins = self.params['bins']
to_bin = where(mask, data, nan)
result = quantiles(to_bin, bins)
# Write self.missing_value into nan locations, whether they were
# generated by our input mask or not.
result[isnan(result)] = self.missing_value
return result.astype(int64_dtype)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + '(%d)' % self.params['bins']
class Relabel(SingleInputMixin, Classifier):
"""
A classifier applying a relabeling function on the result of another
classifier.
Parameters
----------
arg : zipline.pipeline.Classifier
Term produceing the input to be relabeled.
relabel_func : function(LabelArray) -> LabelArray
Function to apply to the result of `term`.
"""
window_length = 0
params = ('relabeler',)
# TODO: Support relabeling for integer dtypes.
@expect_dtypes(term=categorical_dtype)
@expect_types(term=Classifier)
def __new__(cls, term, relabeler):
return super(Relabel, cls).__new__(
cls,
inputs=(term,),
dtype=term.dtype,
mask=term.mask,
relabeler=relabeler,
)
def _compute(self, arrays, dates, assets, mask):
relabeler = self.params['relabeler']
data = arrays[0]
if isinstance(data, LabelArray):
result = data.map(relabeler)
result[~mask] = data.missing_value
else:
raise NotImplementedError(
"Relabeling is not currently supported for "
"int-dtype classifiers."
)
return result
class CustomClassifier(PositiveWindowLengthMixin,
StandardOutputs,
CustomTermMixin,
Classifier):
"""
Base class for user-defined Classifiers.
Does not suppport multiple outputs.
See Also
--------
zipline.pipeline.CustomFactor
zipline.pipeline.CustomFilter
"""
def _validate(self):
try:
super(CustomClassifier, self)._validate()
except UnsupportedDataType:
if self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFactor?',
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFilter?',
)
raise
def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape)
class Latest(LatestMixin, CustomClassifier):
"""
A classifier producing the latest value of an input.
See Also
--------
zipline.pipeline.data.dataset.BoundColumn.latest
zipline.pipeline.factors.factor.Latest
zipline.pipeline.filters.filter.Latest
"""
pass
class InvalidClassifierComparison(TypeError):
def __init__(self, classifier, compval):
super(InvalidClassifierComparison, self).__init__(
"Can't compare classifier of dtype"
" {dtype} to value {value} of type {type}.".format(
dtype=classifier.dtype,
value=compval,
type=type(compval).__name__,
)
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/classifiers/classifier.py | classifier.py |
from functools import total_ordering
from six import (
iteritems,
with_metaclass,
)
from toolz import first
from zipline.pipeline.classifiers import Classifier, Latest as LatestClassifier
from zipline.pipeline.factors import Factor, Latest as LatestFactor
from zipline.pipeline.filters import Filter, Latest as LatestFilter
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import (
AssetExists,
LoadableTerm,
validate_dtype,
)
from zipline.utils.input_validation import ensure_dtype
from zipline.utils.numpy_utils import NoDefaultMissingValue
from zipline.utils.preprocess import preprocess
class Column(object):
"""
An abstract column of data, not yet associated with a dataset.
"""
@preprocess(dtype=ensure_dtype)
def __init__(self,
dtype,
missing_value=NotSpecified,
doc=None,
metadata=None):
self.dtype = dtype
self.missing_value = missing_value
self.doc = doc
self.metadata = metadata.copy() if metadata is not None else {}
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
)
class _BoundColumnDescr(object):
"""
Intermediate class that sits on `DataSet` objects and returns memoized
`BoundColumn` objects when requested.
This exists so that subclasses of DataSets don't share columns with their
parent classes.
"""
def __init__(self, dtype, missing_value, name, doc, metadata):
# Validating and calculating default missing values here guarantees
# that we fail quickly if the user passes an unsupporte dtype or fails
# to provide a missing value for a dtype that requires one
# (e.g. int64), but still enables us to provide an error message that
# points to the name of the failing column.
try:
self.dtype, self.missing_value = validate_dtype(
termname="Column(name={name!r})".format(name=name),
dtype=dtype,
missing_value=missing_value,
)
except NoDefaultMissingValue:
# Re-raise with a more specific message.
raise NoDefaultMissingValue(
"Failed to create Column with name {name!r} and"
" dtype {dtype} because no missing_value was provided\n\n"
"Columns with dtype {dtype} require a missing_value.\n"
"Please pass missing_value to Column() or use a different"
" dtype.".format(dtype=dtype, name=name)
)
self.name = name
self.doc = doc
self.metadata = metadata
def __get__(self, instance, owner):
"""
Produce a concrete BoundColumn object when accessed.
We don't bind to datasets at class creation time so that subclasses of
DataSets produce different BoundColumns.
"""
return BoundColumn(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=owner,
name=self.name,
doc=self.doc,
metadata=self.metadata,
)
class BoundColumn(LoadableTerm):
"""
A column of data that's been concretely bound to a particular dataset.
Instances of this class are dynamically created upon access to attributes
of DataSets (for example, USEquityPricing.close is an instance of this
class).
Attributes
----------
dtype : numpy.dtype
The dtype of data produced when this column is loaded.
latest : zipline.pipeline.data.Factor or zipline.pipeline.data.Filter
A Filter, Factor, or Classifier computing the most recently known value
of this column on each date.
Produces a Filter if self.dtype == ``np.bool_``.
Produces a Classifier if self.dtype == ``np.int64``
Otherwise produces a Factor.
dataset : zipline.pipeline.data.DataSet
The dataset to which this column is bound.
name : str
The name of this column.
metadata : dict
Extra metadata associated with this column.
"""
mask = AssetExists()
window_safe = True
def __new__(cls, dtype, missing_value, dataset, name, doc, metadata):
return super(BoundColumn, cls).__new__(
cls,
domain=dataset.domain,
dtype=dtype,
missing_value=missing_value,
dataset=dataset,
name=name,
ndim=dataset.ndim,
doc=doc,
metadata=metadata,
)
def _init(self, dataset, name, doc, metadata, *args, **kwargs):
self._dataset = dataset
self._name = name
self.__doc__ = doc
self._metadata = metadata
return super(BoundColumn, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, dataset, name, doc, metadata, *args, **kwargs):
return (
super(BoundColumn, cls)._static_identity(*args, **kwargs),
dataset,
name,
doc,
frozenset(sorted(metadata.items(), key=first)),
)
@property
def dataset(self):
"""
The dataset to which this column is bound.
"""
return self._dataset
@property
def name(self):
"""
The name of this column.
"""
return self._name
@property
def metadata(self):
"""
A copy of the metadata for this column.
"""
return self._metadata.copy()
@property
def qualname(self):
"""
The fully-qualified name of this column.
Generated by doing '.'.join([self.dataset.__name__, self.name]).
"""
return '.'.join([self.dataset.__name__, self.name])
@property
def latest(self):
dtype = self.dtype
if dtype in Filter.ALLOWED_DTYPES:
Latest = LatestFilter
elif dtype in Classifier.ALLOWED_DTYPES:
Latest = LatestClassifier
else:
assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype
Latest = LatestFactor
return Latest(
inputs=(self,),
dtype=dtype,
missing_value=self.missing_value,
ndim=self.ndim,
)
def __repr__(self):
return "{qualname}::{dtype}".format(
qualname=self.qualname,
dtype=self.dtype.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return "BoundColumn:\l Dataset: {}\l Column: {}\l".format(
self.dataset.__name__,
self.name
)
def recursive_repr(self):
"""Short repr used to render in recursive contexts."""
return self.qualname
@total_ordering
class DataSetMeta(type):
"""
Metaclass for DataSets
Supplies name and dataset information to Column attributes.
"""
def __new__(mcls, name, bases, dict_):
newtype = super(DataSetMeta, mcls).__new__(mcls, name, bases, dict_)
# collect all of the column names that we inherit from our parents
column_names = set().union(
*(getattr(base, '_column_names', ()) for base in bases)
)
for maybe_colname, maybe_column in iteritems(dict_):
if isinstance(maybe_column, Column):
# add column names defined on our class
bound_column_descr = maybe_column.bind(maybe_colname)
setattr(newtype, maybe_colname, bound_column_descr)
column_names.add(maybe_colname)
newtype._column_names = frozenset(column_names)
return newtype
@property
def columns(self):
return frozenset(
getattr(self, colname) for colname in self._column_names
)
def __lt__(self, other):
return id(self) < id(other)
def __repr__(self):
return '<DataSet: %r>' % self.__name__
class DataSet(with_metaclass(DataSetMeta, object)):
"""
Base class for describing inputs to Pipeline expressions.
A DataSet is a collection of :class:`zipline.pipeline.data.Column` that
describes a collection of logically-related inputs to the Pipeline API.
To create a new Pipeline dataset, subclass from this class and create
columns at class scope for each attribute of your dataset. Each column
requires a dtype that describes the type of data that should be produced by
a loader for the dataset. Integer columns must also provide a
``missing_value`` to be used when no value is available for a given
asset/date combination.
Examples
--------
The built-in USEquityPricing dataset is defined as follows::
class EquityPricing(DataSet):
open = Column(float)
high = Column(float)
low = Column(float)
close = Column(float)
volume = Column(float)
Columns can have types other than float. A dataset containing assorted
company metadata might be defined like this::
class CompanyMetadata(DataSet):
# Use float for semantically-numeric data, even if it's always
# integral valued (see Notes section below). The default missing
# value for floats is NaN.
shares_outstanding = Column(float)
# Use object-dtype for string columns. The default missing value
# for object-dtype columns is None.
ticker = Column(object)
# Use integers for integer-valued categorical data like sector or
# industry codes. Integer-dtype columns require an explicit missing
# value.
sector_code = Column(int, missing_value=-1)
# The default missing value for bool-dtype columns is False.
is_primary_share = Column(bool)
Notes
-----
Because numpy has no native support for integers with missing values, users
are strongly encouraged to use floats for any data that's semantically
numeric. Doing so enables the use of `NaN` as a natural missing value,
which has useful propagation semantics.
"""
domain = None
ndim = 2 | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/data/dataset.py | dataset.py |
from itertools import chain
from operator import attrgetter
from numpy import (
any as np_any,
float64,
nan,
nanpercentile,
uint8,
)
from zipline.errors import (
BadPercentileBounds,
NonExistentAssetInTimeFrame,
UnsupportedDataType,
)
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import is_missing, grouped_masked_is_maximal
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
FILTER_BINOPS,
method_name_for_op,
NumericalExpression,
)
from zipline.pipeline.mixins import (
AliasedMixin,
CustomTermMixin,
DownsampledMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import classlazyval
from zipline.utils.numpy_utils import (
bool_dtype,
int64_dtype,
repeat_first_axis,
)
from ..sentinels import NotSpecified
def concat_tuples(*tuples):
"""
Concatenate a sequence of tuples into one tuple.
"""
return tuple(chain(*tuples))
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
class Filter(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline expression computing a boolean output.
Filters are most commonly useful for describing sets of assets to include
or exclude for some particular purpose. Many Pipeline API functions accept
a ``mask`` argument, which can be supplied a Filter indicating that only
values passing the Filter should be considered when performing the
requested computation. For example, :meth:`zipline.pipeline.Factor.top`
accepts a mask indicating that ranks should be computed only on assets that
passed the specified Filter.
The most common way to construct a Filter is via one of the comparison
operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of
:class:`~zipline.pipeline.Factor`. For example, a natural way to construct
a Filter for stocks with a 10-day VWAP less than $20.0 is to first
construct a Factor computing 10-day VWAP and compare it to the scalar value
20.0::
>>> from zipline.pipeline.factors import VWAP
>>> vwap_10 = VWAP(window_length=10)
>>> vwaps_under_20 = (vwap_10 <= 20)
Filters can also be constructed via comparisons between two Factors. For
example, to construct a Filter producing True for asset/date pairs where
the asset's 10-day VWAP was greater than it's 30-day VWAP::
>>> short_vwap = VWAP(window_length=10)
>>> long_vwap = VWAP(window_length=30)
>>> higher_short_vwap = (short_vwap > long_vwap)
Filters can be combined via the ``&`` (and) and ``|`` (or) operators.
``&``-ing together two filters produces a new Filter that produces True if
**both** of the inputs produced True.
``|``-ing together two filters produces a new Filter that produces True if
**either** of its inputs produced True.
The ``~`` operator can be used to invert a Filter, swapping all True values
with Falses and vice-versa.
Filters may be set as the ``screen`` attribute of a Pipeline, indicating
asset/date pairs for which the filter produces False should be excluded
from the Pipeline's output. This is useful both for reducing noise in the
output of a Pipeline and for reducing memory consumption of Pipeline
results.
"""
# Filters are window-safe by default, since a yes/no decision means the
# same thing from all temporal perspectives.
window_safe = True
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = FILTER_DTYPES
dtype = bool_dtype
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
for op in FILTER_BINOPS
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): binary_operator(op)
for op in FILTER_BINOPS
}
)
__invert__ = unary_operator('~')
def _validate(self):
# Run superclass validation first so that we handle `dtype not passed`
# before this.
retval = super(Filter, self)._validate()
if self.dtype != bool_dtype:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype
)
return retval
@classlazyval
def _downsampled_type(self):
return DownsampledMixin.make_downsampled_type(Filter)
@classlazyval
def _aliased_type(self):
return AliasedMixin.make_aliased_type(Filter)
class NumExprFilter(NumericalExpression, Filter):
"""
A Filter computed from a numexpr expression.
"""
@classmethod
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask
class NullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return data.is_missing()
return is_missing(arrays[0], self.inputs[0].missing_value)
class NotNullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are **not** missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NotNullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return ~data.is_missing()
return ~is_missing(arrays[0], self.inputs[0].missing_value)
class PercentileFilter(SingleInputMixin, Filter):
"""
A Filter representing assets falling between percentile bounds of a Factor.
Parameters
----------
factor : zipline.pipeline.factor.Factor
The factor over which to compute percentile bounds.
min_percentile : float [0.0, 1.0]
The minimum percentile rank of an asset that will pass the filter.
max_percentile : float [0.0, 1.0]
The maxiumum percentile rank of an asset that will pass the filter.
"""
window_length = 0
def __new__(cls, factor, min_percentile, max_percentile, mask):
return super(PercentileFilter, cls).__new__(
cls,
inputs=(factor,),
mask=mask,
min_percentile=min_percentile,
max_percentile=max_percentile,
)
def _init(self, min_percentile, max_percentile, *args, **kwargs):
self._min_percentile = min_percentile
self._max_percentile = max_percentile
return super(PercentileFilter, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs):
return (
super(PercentileFilter, cls)._static_identity(*args, **kwargs),
min_percentile,
max_percentile,
)
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
def graph_repr(self):
return "{}:\l min: {}, max: {}\l".format(
type(self).__name__,
self._min_percentile,
self._max_percentile,
)
class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter):
"""
Base class for user-defined Filters.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is passed to the CustomFilter constructor, we look for a
class-level attribute named `inputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFilter constructor, we look for a class-level attribute
named `window_length`.
Notes
-----
Users implementing their own Filters should subclass CustomFilter and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFilter constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[bool, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
See the documentation for
:class:`~zipline.pipeline.factors.factor.CustomFactor` for more details on
implementing a custom ``compute`` method.
See Also
--------
zipline.pipeline.factors.factor.CustomFactor
"""
def _validate(self):
try:
super(CustomFilter, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomClassifier?',
)
elif self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFactor?',
)
raise
class ArrayPredicate(SingleInputMixin, Filter):
"""
A filter applying a function from (ndarray, *args) -> ndarray[bool].
Parameters
----------
term : zipline.pipeline.Term
Term producing the array over which the predicate will be computed.
op : function(ndarray, *args) -> ndarray[bool]
Function to apply to the result of `term`.
opargs : tuple[hashable]
Additional argument to apply to ``op``.
"""
params = ('op', 'opargs')
window_length = 0
@expect_types(term=Term, opargs=tuple)
def __new__(cls, term, op, opargs):
hash(opargs) # fail fast if opargs isn't hashable.
return super(ArrayPredicate, cls).__new__(
ArrayPredicate,
op=op,
opargs=opargs,
inputs=(term,),
mask=term.mask,
)
def _compute(self, arrays, dates, assets, mask):
params = self.params
data = arrays[0]
return params['op'](data, *params['opargs']) & mask
def graph_repr(self):
return "{}:\l op: {}.{}()".format(
type(self).__name__,
self.params['op'].__module__,
self.params['op'].__name__,
)
class Latest(LatestMixin, CustomFilter):
"""
Filter producing the most recently-known value of `inputs[0]` on each day.
"""
pass
class SingleAsset(Filter):
"""
A Filter that computes to True only for the given asset.
"""
inputs = []
window_length = 1
def __new__(cls, asset):
return super(SingleAsset, cls).__new__(cls, asset=asset)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SingleAsset, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SingleAsset, cls)._static_identity(*args, **kwargs), asset,
)
def _compute(self, arrays, dates, assets, mask):
is_my_asset = (assets == self._asset.sid)
out = repeat_first_axis(is_my_asset, len(mask))
# Raise an exception if `self._asset` does not exist for the entirety
# of the timeframe over which we are computing.
if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)):
raise NonExistentAssetInTimeFrame(
asset=self._asset, start_date=dates[0], end_date=dates[-1],
)
return out
def graph_repr(self):
return "SingleAsset:\l asset: {!r}\l".format(self._asset)
class StaticSids(Filter):
"""
A Filter that computes True for a specific set of predetermined sids.
``StaticSids`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of sids that are known ahead of
time.
Parameters
----------
sids : iterable[int]
An iterable of sids for which to filter.
"""
inputs = ()
window_length = 0
params = ('sids',)
def __new__(cls, sids):
sids = frozenset(sids)
return super(StaticSids, cls).__new__(cls, sids=sids)
def _compute(self, arrays, dates, sids, mask):
my_columns = sids.isin(self.params['sids'])
return repeat_first_axis(my_columns, len(mask)) & mask
class StaticAssets(StaticSids):
"""
A Filter that computes True for a specific set of predetermined assets.
``StaticAssets`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of assets that are known ahead of
time.
Parameters
----------
assets : iterable[Asset]
An iterable of assets for which to filter.
"""
def __new__(cls, assets):
sids = frozenset(asset.sid for asset in assets)
return super(StaticAssets, cls).__new__(cls, sids)
class AllPresent(CustomFilter, SingleInputMixin, StandardOutputs):
"""Pipeline filter indicating input term has data for a given window.
"""
def _validate(self):
if isinstance(self.inputs[0], Filter):
raise TypeError(
"Input to filter `AllPresent` cannot be a Filter."
)
return super(AllPresent, self)._validate()
def compute(self, today, assets, out, value):
if isinstance(value, LabelArray):
out[:] = ~np_any(value.is_missing(), axis=0)
else:
out[:] = ~np_any(
is_missing(value, self.inputs[0].missing_value),
axis=0,
)
class MaximumFilter(Filter, StandardOutputs):
"""Pipeline filter that selects the top asset, possibly grouped and masked.
"""
window_length = 0
def __new__(cls, factor, groupby, mask):
if groupby is NotSpecified:
from zipline.pipeline.classifiers import Everything
groupby = Everything()
return super(MaximumFilter, cls).__new__(
cls,
inputs=(factor, groupby),
mask=mask,
)
def _compute(self, arrays, dates, assets, mask):
# XXX: We're doing a lot of unncessary work here if `groupby` isn't
# specified.
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
effective_mask = (
mask
& (group_labels != null_label)
& ~is_missing(data, self.inputs[0].missing_value)
).view(uint8)
return grouped_masked_is_maximal(
# Unconditionally view the data as int64.
# This is safe because casting from float64 to int64 is an
# order-preserving operation.
data.view(int64_dtype),
# PERF: Consider supporting different sizes of group labels.
group_labels.astype(int64_dtype),
effective_mask,
)
def __repr__(self):
return "Maximum({!r}, groupby={!r}, mask={!r})".format(
self.inputs[0], self.inputs[1], self.mask,
)
def graph_repr(self):
return "Maximum:\l groupby: {}\l mask: {}\l".format(
type(self.inputs[1]).__name__,
type(self.mask).__name__,
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/filters/filter.py | filter.py |
from numpy import (
iinfo,
uint32,
)
from trading_calendars import get_calendar
from zipline.data.us_equity_pricing import (
BcolzDailyBarReader,
SQLiteAdjustmentReader,
)
from zipline.lib.adjusted_array import AdjustedArray
from .base import PipelineLoader
from .utils import shift_dates
UINT32_MAX = iinfo(uint32).max
class USEquityPricingLoader(PipelineLoader):
"""
PipelineLoader for US Equity Pricing data
Delegates loading of baselines and adjustments.
"""
def __init__(self, raw_price_loader, adjustments_loader):
self.raw_price_loader = raw_price_loader
self.adjustments_loader = adjustments_loader
cal = self.raw_price_loader.trading_calendar or \
get_calendar("NYSE")
self._all_sessions = cal.all_sessions
@classmethod
def from_files(cls, pricing_path, adjustments_path):
"""
Create a loader from a bcolz equity pricing dir and a SQLite
adjustments path.
Parameters
----------
pricing_path : str
Path to a bcolz directory written by a BcolzDailyBarWriter.
adjusments_path : str
Path to an adjusments db written by a SQLiteAdjustmentWriter.
"""
return cls(
BcolzDailyBarReader(pricing_path),
SQLiteAdjustmentReader(adjustments_path)
)
def load_adjusted_array(self, columns, dates, assets, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the start of each date. We assume that the latest data
# known on day N is the data from day (N - 1), so we shift all query
# dates back by a day.
start_date, end_date = shift_dates(
self._all_sessions, dates[0], dates[-1], shift=1,
)
colnames = [c.name for c in columns]
raw_arrays = self.raw_price_loader.load_raw_arrays(
colnames,
start_date,
end_date,
assets,
)
adjustments = self.adjustments_loader.load_adjustments(
colnames,
dates,
assets,
)
out = {}
for c, c_raw, c_adjs in zip(columns, raw_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
c_adjs,
c.missing_value,
)
return out | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/equity_pricing_loader.py | equity_pricing_loader.py |
from numpy import (
arange,
array,
eye,
float64,
full,
iinfo,
uint32,
)
from numpy.random import RandomState
from pandas import DataFrame, Timestamp
from six import iteritems
from sqlite3 import connect as sqlite3_connect
from .base import PipelineLoader
from .frame import DataFrameLoader
from zipline.data.us_equity_pricing import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
from zipline.utils.numpy_utils import (
bool_dtype,
datetime64ns_dtype,
float64_dtype,
int64_dtype,
object_dtype,
)
UINT_32_MAX = iinfo(uint32).max
def nanos_to_seconds(nanos):
return nanos / (1000 * 1000 * 1000)
class PrecomputedLoader(PipelineLoader):
"""
Synthetic PipelineLoader that uses a pre-computed array for each column.
Parameters
----------
values : dict
Map from column to values to use for that column.
Values can be anything that can be passed as the first positional
argument to a DataFrame whose indices are ``dates`` and ``sids``
dates : iterable[datetime-like]
Row labels for input data. Can be anything that pd.DataFrame will
coerce to a DatetimeIndex.
sids : iterable[int-like]
Column labels for input data. Can be anything that pd.DataFrame will
coerce to an Int64Index.
Notes
-----
Adjustments are unsupported by this loader.
"""
def __init__(self, constants, dates, sids):
loaders = {}
for column, const in iteritems(constants):
frame = DataFrame(
const,
index=dates,
columns=sids,
dtype=column.dtype,
)
loaders[column] = DataFrameLoader(
column=column,
baseline=frame,
adjustments=None,
)
self._loaders = loaders
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load by delegating to sub-loaders.
"""
out = {}
for col in columns:
try:
loader = self._loaders[col]
except KeyError:
raise ValueError("Couldn't find loader for %s" % col)
out.update(
loader.load_adjusted_array([col], dates, assets, mask)
)
return out
class EyeLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s
elsewhere.
Parameters
----------
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, columns, dates, sids):
shape = (len(dates), len(sids))
super(EyeLoader, self).__init__(
{column: eye(shape, dtype=column.dtype) for column in columns},
dates,
sids,
)
class SeededRandomLoader(PrecomputedLoader):
"""
A PrecomputedLoader that emits arrays randomly-generated with a given seed.
Parameters
----------
seed : int
Seed for numpy.random.RandomState.
columns : list[BoundColumn]
Columns that this loader should know about.
dates : iterable[datetime-like]
Same as PrecomputedLoader.
sids : iterable[int-like]
Same as PrecomputedLoader
"""
def __init__(self, seed, columns, dates, sids):
self._seed = seed
super(SeededRandomLoader, self).__init__(
{c: self.values(c.dtype, dates, sids) for c in columns},
dates,
sids,
)
def values(self, dtype, dates, sids):
"""
Make a random array of shape (len(dates), len(sids)) with ``dtype``.
"""
shape = (len(dates), len(sids))
return {
datetime64ns_dtype: self._datetime_values,
float64_dtype: self._float_values,
int64_dtype: self._int_values,
bool_dtype: self._bool_values,
object_dtype: self._object_values,
}[dtype](shape)
@property
def state(self):
"""
Make a new RandomState from our seed.
This ensures that every call to _*_values produces the same output
every time for a given SeededRandomLoader instance.
"""
return RandomState(self._seed)
def _float_values(self, shape):
"""
Return uniformly-distributed floats between -0.0 and 100.0.
"""
return self.state.uniform(low=0.0, high=100.0, size=shape)
def _int_values(self, shape):
"""
Return uniformly-distributed integers between 0 and 100.
"""
return (self.state.randint(low=0, high=100, size=shape)
.astype('int64')) # default is system int
def _datetime_values(self, shape):
"""
Return uniformly-distributed dates in 2014.
"""
start = Timestamp('2014', tz='UTC').asm8
offsets = self.state.randint(
low=0,
high=364,
size=shape,
).astype('timedelta64[D]')
return start + offsets
def _bool_values(self, shape):
"""
Return uniformly-distributed True/False values.
"""
return self.state.randn(*shape) < 0
def _object_values(self, shape):
res = self._int_values(shape).astype(str).astype(object)
return res
OHLCV = ('open', 'high', 'low', 'close', 'volume')
OHLC = ('open', 'high', 'low', 'close')
PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC')
def asset_start(asset_info, asset):
ret = asset_info.loc[asset]['start_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def asset_end(asset_info, asset):
ret = asset_info.loc[asset]['end_date']
if ret.tz is None:
ret = ret.tz_localize('UTC')
assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp"
return ret
def make_bar_data(asset_info, calendar):
"""
For a given asset/date/column combination, we generate a corresponding raw
value using the following formula for OHLCV columns:
data(asset, date, column) = (100,000 * asset_id)
+ (10,000 * column_num)
+ (date - Jan 1 2000).days # ~6000 for 2015
where:
column_num('open') = 0
column_num('high') = 1
column_num('low') = 2
column_num('close') = 3
column_num('volume') = 4
We use days since Jan 1, 2000 to guarantee that there are no collisions
while also the produced values smaller than UINT32_MAX / 1000.
For 'day' and 'id', we use the standard format expected by the base class.
Parameters
----------
asset_info : DataFrame
DataFrame with asset_id as index and 'start_date'/'end_date' columns.
calendar : pd.DatetimeIndex
The trading calendar to use.
Yields
------
p : (int, pd.DataFrame)
A sid, data pair to be passed to BcolzDailyDailyBarWriter.write
"""
assert (
# Using .value here to avoid having to care about UTC-aware dates.
PSEUDO_EPOCH.value <
calendar.normalize().min().value <=
asset_info['start_date'].min().value
), "calendar.min(): %s\nasset_info['start_date'].min(): %s" % (
calendar.min(),
asset_info['start_date'].min(),
)
assert (asset_info['start_date'] < asset_info['end_date']).all()
def _raw_data_for_asset(asset_id):
"""
Generate 'raw' data that encodes information about the asset.
See docstring for a description of the data format.
"""
# Get the dates for which this asset existed according to our asset
# info.
datetimes = calendar[calendar.slice_indexer(
asset_start(asset_info, asset_id),
asset_end(asset_info, asset_id),
)]
data = full(
(len(datetimes), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)),
asset_id * 100 * 1000,
dtype=uint32,
)
# Add 10,000 * column-index to OHLCV columns
data[:, :5] += arange(5, dtype=uint32) * 1000
# Add days since Jan 1 2001 for OHLCV columns.
data[:, :5] += (datetimes - PSEUDO_EPOCH).days[:, None].astype(uint32)
frame = DataFrame(
data,
index=datetimes,
columns=US_EQUITY_PRICING_BCOLZ_COLUMNS,
)
frame['day'] = nanos_to_seconds(datetimes.asi8)
frame['id'] = asset_id
return frame
for asset in asset_info.index:
yield asset, _raw_data_for_asset(asset)
def expected_bar_value(asset_id, date, colname):
"""
Check that the raw value for an asset/date/column triple is as
expected.
Used by tests to verify data written by a writer.
"""
from_asset = asset_id * 100000
from_colname = OHLCV.index(colname) * 1000
from_date = (date - PSEUDO_EPOCH).days
return from_asset + from_colname + from_date
def expected_bar_values_2d(dates, asset_info, colname):
"""
Return an 2D array containing cls.expected_value(asset_id, date,
colname) for each date/asset pair in the inputs.
Values before/after an assets lifetime are filled with 0 for volume and
NaN for price columns.
"""
if colname == 'volume':
dtype = uint32
missing = 0
else:
dtype = float64
missing = float('nan')
assets = asset_info.index
data = full((len(dates), len(assets)), missing, dtype=dtype)
for j, asset in enumerate(assets):
start = asset_start(asset_info, asset)
end = asset_end(asset_info, asset)
for i, date in enumerate(dates):
# No value expected for dates outside the asset's start/end
# date.
if not (start <= date <= end):
continue
data[i, j] = expected_bar_value(asset, date, colname)
return data
class NullAdjustmentReader(SQLiteAdjustmentReader):
"""
A SQLiteAdjustmentReader that stores no adjustments and uses in-memory
SQLite.
"""
def __init__(self):
conn = sqlite3_connect(':memory:')
writer = SQLiteAdjustmentWriter(conn, None, None)
empty = DataFrame({
'sid': array([], dtype=uint32),
'effective_date': array([], dtype=uint32),
'ratio': array([], dtype=float),
})
empty_dividends = DataFrame({
'sid': array([], dtype=uint32),
'amount': array([], dtype=float64),
'record_date': array([], dtype='datetime64[ns]'),
'ex_date': array([], dtype='datetime64[ns]'),
'declared_date': array([], dtype='datetime64[ns]'),
'pay_date': array([], dtype='datetime64[ns]'),
})
writer.write(splits=empty, mergers=empty, dividends=empty_dividends)
super(NullAdjustmentReader, self).__init__(conn) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/synthetic.py | synthetic.py |
from functools import partial
from numpy import (
ix_,
zeros,
)
from pandas import (
DataFrame,
DatetimeIndex,
Index,
Int64Index,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import make_adjustment_from_labels
from zipline.utils.numpy_utils import as_column
from .base import PipelineLoader
ADJUSTMENT_COLUMNS = Index([
'sid',
'value',
'kind',
'start_date',
'end_date',
'apply_date',
])
class DataFrameLoader(PipelineLoader):
"""
A PipelineLoader that reads its input from DataFrames.
Mostly useful for testing, but can also be used for real work if your data
fits in memory.
Parameters
----------
column : zipline.pipeline.data.BoundColumn
The column whose data is loadable by this loader.
baseline : pandas.DataFrame
A DataFrame with index of type DatetimeIndex and columns of type
Int64Index. Dates should be labelled with the first date on which a
value would be **available** to an algorithm. This means that OHLCV
data should generally be shifted back by a trading day before being
supplied to this class.
adjustments : pandas.DataFrame, default=None
A DataFrame with the following columns:
sid : int
value : any
kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES)
start_date : datetime64 (can be NaT)
end_date : datetime64 (must be set)
apply_date : datetime64 (must be set)
The default of None is interpreted as "no adjustments to the baseline".
"""
def __init__(self, column, baseline, adjustments=None):
self.column = column
self.baseline = baseline.values.astype(self.column.dtype)
self.dates = baseline.index
self.assets = baseline.columns
if adjustments is None:
adjustments = DataFrame(
index=DatetimeIndex([]),
columns=ADJUSTMENT_COLUMNS,
)
else:
# Ensure that columns are in the correct order.
adjustments = adjustments.reindex_axis(ADJUSTMENT_COLUMNS, axis=1)
adjustments.sort_values(['apply_date', 'sid'], inplace=True)
self.adjustments = adjustments
self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date)
self.adjustment_end_dates = DatetimeIndex(adjustments.end_date)
self.adjustment_sids = Int64Index(adjustments.sid)
def format_adjustments(self, dates, assets):
"""
Build a dict of Adjustment objects in the format expected by
AdjustedArray.
Returns a dict of the form:
{
# Integer index into `dates` for the date on which we should
# apply the list of adjustments.
1 : [
Float64Multiply(first_row=2, last_row=4, col=3, value=0.5),
Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0),
...
],
...
}
"""
make_adjustment = partial(make_adjustment_from_labels, dates, assets)
min_date, max_date = dates[[0, -1]]
# TODO: Consider porting this to Cython.
if len(self.adjustments) == 0:
return {}
# Mask for adjustments whose apply_dates are in the requested window of
# dates.
date_bounds = self.adjustment_apply_dates.slice_indexer(
min_date,
max_date,
)
dates_filter = zeros(len(self.adjustments), dtype='bool')
dates_filter[date_bounds] = True
# Ignore adjustments whose apply_date is in range, but whose end_date
# is out of range.
dates_filter &= (self.adjustment_end_dates >= min_date)
# Mask for adjustments whose sids are in the requested assets.
sids_filter = self.adjustment_sids.isin(assets.values)
adjustments_to_use = self.adjustments.loc[
dates_filter & sids_filter
].set_index('apply_date')
# For each apply_date on which we have an adjustment, compute
# the integer index of that adjustment's apply_date in `dates`.
# Then build a list of Adjustment objects for that apply_date.
# This logic relies on the sorting applied on the previous line.
out = {}
previous_apply_date = object()
for row in adjustments_to_use.itertuples():
# This expansion depends on the ordering of the DataFrame columns,
# defined above.
apply_date, sid, value, kind, start_date, end_date = row
if apply_date != previous_apply_date:
# Get the next apply date if no exact match.
row_loc = dates.get_loc(apply_date, method='bfill')
current_date_adjustments = out[row_loc] = []
previous_apply_date = apply_date
# Look up the approprate Adjustment constructor based on the value
# of `kind`.
current_date_adjustments.append(
make_adjustment(start_date, end_date, sid, kind, value)
)
return out
def load_adjusted_array(self, columns, dates, assets, mask):
"""
Load data from our stored baseline.
"""
column = self.column
if len(columns) != 1:
raise ValueError(
"Can't load multiple columns with DataFrameLoader"
)
elif columns[0] != column:
raise ValueError("Can't load unknown column %s" % columns[0])
date_indexer = self.dates.get_indexer(dates)
assets_indexer = self.assets.get_indexer(assets)
# Boolean arrays with True on matched entries
good_dates = (date_indexer != -1)
good_assets = (assets_indexer != -1)
data = self.baseline[ix_(date_indexer, assets_indexer)]
mask = (good_assets & as_column(good_dates)) & mask
# Mask out requested columns/rows that didn't match.
data[~mask] = column.missing_value
return {
column: AdjustedArray(
# Pull out requested columns/rows from our baseline data.
data=data,
adjustments=self.format_adjustments(dates, assets),
missing_value=column.missing_value,
),
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/frame.py | frame.py |
import datetime
import numpy as np
import pandas as pd
from zipline.errors import NoFurtherDataError
from zipline.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME
from zipline.utils.numpy_utils import categorical_dtype
from zipline.utils.pandas_utils import mask_between_time
def is_sorted_ascending(a):
"""Check if a numpy array is sorted."""
return (np.fmax.accumulate(a) <= a).all()
def validate_event_metadata(event_dates,
event_timestamps,
event_sids):
assert is_sorted_ascending(event_dates), "event dates must be sorted"
assert len(event_sids) == len(event_dates) == len(event_timestamps), \
"mismatched arrays: %d != %d != %d" % (
len(event_sids),
len(event_dates),
len(event_timestamps),
)
def next_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the next event for each
sid at each moment in time.
Locations where no next event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
sid_ixs = all_sids.searchsorted(event_sids)
# side='right' here ensures that we include the event date itself
# if it's in all_dates.
dt_ixs = all_dates.searchsorted(event_dates, side='right')
ts_ixs = all_dates.searchsorted(event_timestamps)
# Walk backward through the events, writing the index of the event into
# slots ranging from the event's timestamp to its asof. This depends for
# correctness on the fact that event_dates is sorted in ascending order,
# because we need to overwrite later events with earlier ones if their
# eligible windows overlap.
for i in range(len(event_sids) - 1, -1, -1):
start_ix = ts_ixs[i]
end_ix = dt_ixs[i]
out[start_ix:end_ix, sid_ixs[i]] = i
return out
def previous_event_indexer(all_dates,
all_sids,
event_dates,
event_timestamps,
event_sids):
"""
Construct an index array that, when applied to an array of values, produces
a 2D array containing the values associated with the previous event for
each sid at each moment in time.
Locations where no previous event was known will be filled with -1.
Parameters
----------
all_dates : ndarray[datetime64[ns], ndim=1]
Row labels for the target output.
all_sids : ndarray[int, ndim=1]
Column labels for the target output.
event_dates : ndarray[datetime64[ns], ndim=1]
Dates on which each input events occurred/will occur. ``event_dates``
must be in sorted order, and may not contain any NaT values.
event_timestamps : ndarray[datetime64[ns], ndim=1]
Dates on which we learned about each input event.
event_sids : ndarray[int, ndim=1]
Sids assocated with each input event.
Returns
-------
indexer : ndarray[int, ndim=2]
An array of shape (len(all_dates), len(all_sids)) of indices into
``event_{dates,timestamps,sids}``.
"""
validate_event_metadata(event_dates, event_timestamps, event_sids)
out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64)
eff_dts = np.maximum(event_dates, event_timestamps)
sid_ixs = all_sids.searchsorted(event_sids)
dt_ixs = all_dates.searchsorted(eff_dts)
# Walk backwards through the events, writing the index of the event into
# slots ranging from max(event_date, event_timestamp) to the start of the
# previously-written event. This depends for correctness on the fact that
# event_dates is sorted in ascending order, because we need to have written
# later events so we know where to stop forward-filling earlier events.
last_written = {}
for i in range(len(event_dates) - 1, -1, -1):
sid_ix = sid_ixs[i]
dt_ix = dt_ixs[i]
out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i
last_written[sid_ix] = dt_ix
return out
def normalize_data_query_time(dt, time, tz):
"""Apply the correct time and timezone to a date.
Parameters
----------
dt : pd.Timestamp
The original datetime that represents the date.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
Returns
-------
query_dt : pd.Timestamp
The timestamp with the correct time and date in utc.
"""
# merge the correct date with the time in the given timezone then convert
# back to utc
return pd.Timestamp(
datetime.datetime.combine(dt.date(), time),
tz=tz,
).tz_convert('utc')
def normalize_data_query_bounds(lower, upper, time, tz):
"""Adjust the first and last dates in the requested datetime index based on
the provided query time and tz.
lower : pd.Timestamp
The lower date requested.
upper : pd.Timestamp
The upper date requested.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
"""
# Subtract one day to grab things that happened on the first day we are
# requesting. This doesn't need to be a trading day, we are only adding
# a lower bound to limit the amount of in memory filtering that needs
# to happen.
lower -= datetime.timedelta(days=1)
if time is not None:
return normalize_data_query_time(
lower,
time,
tz,
), normalize_data_query_time(
upper,
time,
tz,
)
return lower, upper
_midnight = datetime.time(0, 0)
def normalize_timestamp_to_query_time(df,
time,
tz,
inplace=False,
ts_field='timestamp'):
"""Update the timestamp field of a dataframe to normalize dates around
some data query time/timezone.
Parameters
----------
df : pd.DataFrame
The dataframe to update. This needs a column named ``ts_field``.
time : datetime.time
The time of day to use as the cutoff point for new data. Data points
that you learn about after this time will become available to your
algorithm on the next trading day.
tz : tzinfo
The timezone to normalize your dates to before comparing against
`time`.
inplace : bool, optional
Update the dataframe in place.
ts_field : str, optional
The name of the timestamp field in ``df``.
Returns
-------
df : pd.DataFrame
The dataframe with the timestamp field normalized. If ``inplace`` is
true, then this will be the same object as ``df`` otherwise this will
be a copy.
"""
if not inplace:
# don't mutate the dataframe in place
df = df.copy()
# There is a pandas bug (0.18.1) where if the timestamps in a
# normalized DatetimeIndex are not sorted and one calls `tz_localize(None)`
# on tha DatetimeIndex, some of the dates will be shifted by an hour
# (similarly to the previously mentioned bug). Therefore, we must sort
# the df here to ensure that we get the normalize correctly.
df.sort_values(ts_field, inplace=True)
dtidx = pd.DatetimeIndex(df.loc[:, ts_field], tz='utc')
dtidx_local_time = dtidx.tz_convert(tz)
to_roll_forward = mask_between_time(
dtidx_local_time,
time,
_midnight,
include_end=False,
)
# For all of the times that are greater than our query time add 1
# day and truncate to the date.
# We normalize twice here because of a bug in pandas 0.16.1 that causes
# tz_localize() to shift some timestamps by an hour if they are not grouped
# together by DST/EST.
df.loc[to_roll_forward, ts_field] = (
dtidx_local_time[to_roll_forward] + datetime.timedelta(days=1)
).normalize().tz_localize(None).tz_localize('utc').normalize()
df.loc[~to_roll_forward, ts_field] = dtidx[~to_roll_forward].normalize()
return df
def check_data_query_args(data_query_time, data_query_tz):
"""Checks the data_query_time and data_query_tz arguments for loaders
and raises a standard exception if one is None and the other is not.
Parameters
----------
data_query_time : datetime.time or None
data_query_tz : tzinfo or None
Raises
------
ValueError
Raised when only one of the arguments is None.
"""
if (data_query_time is None) ^ (data_query_tz is None):
raise ValueError(
"either 'data_query_time' and 'data_query_tz' must both be"
" None or neither may be None (got %r, %r)" % (
data_query_time,
data_query_tz,
),
)
def last_in_date_group(df,
dates,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
"""
Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
dates : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns.
"""
idx = [dates[dates.searchsorted(
df[TS_FIELD_NAME].values.astype('datetime64[D]')
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
# For the number of things that we're grouping by (except TS), unstack
# the df. Done this way because of an unresolved pandas bug whereby
# passing a list of levels with mixed dtypes to unstack causes the
# resulting DataFrame to have all object-type columns.
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=dates,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(dates)
return last_in_group
def ffill_across_cols(df, columns, name_map):
"""
Forward fill values in a DataFrame with special logic to handle cases
that pd.DataFrame.ffill cannot and cast columns to appropriate types.
Parameters
----------
df : pd.DataFrame
The DataFrame to do forward-filling on.
columns : list of BoundColumn
The BoundColumns that correspond to columns in the DataFrame to which
special filling and/or casting logic should be applied.
name_map: map of string -> string
Mapping from the name of each BoundColumn to the associated column
name in `df`.
"""
df.ffill(inplace=True)
# Fill in missing values specified by each column. This is made
# significantly more complex by the fact that we need to work around
# two pandas issues:
# 1) When we have sids, if there are no records for a given sid for any
# dates, pandas will generate a column full of NaNs for that sid.
# This means that some of the columns in `dense_output` are now
# float instead of the intended dtype, so we have to coerce back to
# our expected type and convert NaNs into the desired missing value.
# 2) DataFrame.ffill assumes that receiving None as a fill-value means
# that no value was passed. Consequently, there's no way to tell
# pandas to replace NaNs in an object column with None using fillna,
# so we have to roll our own instead using df.where.
for column in columns:
column_name = name_map[column.name]
# Special logic for strings since `fillna` doesn't work if the
# missing value is `None`.
if column.dtype == categorical_dtype:
df[column_name] = df[
column.name
].where(pd.notnull(df[column_name]),
column.missing_value)
else:
# We need to execute `fillna` before `astype` in case the
# column contains NaNs and needs to be cast to bool or int.
# This is so that the NaNs are replaced first, since pandas
# can't convert NaNs for those types.
df[column_name] = df[
column_name
].fillna(column.missing_value).astype(column.dtype)
def shift_dates(dates, start_date, end_date, shift):
"""
Shift dates of a pipeline query back by `shift` days.
load_adjusted_array is called with dates on which the user's algo
will be shown data, which means we need to return the data that would
be known at the start of each date. This is often labeled with a
previous date in the underlying data (e.g. at the start of today, we
have the data as of yesterday). In this case, we can shift the query
dates back to query the appropriate values.
Parameters
----------
dates : DatetimeIndex
All known dates.
start_date : pd.Timestamp
Start date of the pipeline query.
end_date : pd.Timestamp
End date of the pipeline query.
shift : int
The number of days to shift back the query dates.
"""
try:
start = dates.get_loc(start_date)
except KeyError:
if start_date < dates[0]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data starting on {query_start}, "
"but first known date is {calendar_start}"
).format(
query_start=str(start_date),
calendar_start=str(dates[0]),
)
)
else:
raise ValueError("Query start %s not in calendar" % start_date)
# Make sure that shifting doesn't push us out of the calendar.
if start < shift:
raise NoFurtherDataError(
msg=(
"Pipeline Query requested data from {shift}"
" days before {query_start}, but first known date is only "
"{start} days earlier."
).format(shift=shift, query_start=start_date, start=start),
)
try:
end = dates.get_loc(end_date)
except KeyError:
if end_date > dates[-1]:
raise NoFurtherDataError(
msg=(
"Pipeline Query requesting data up to {query_end}, "
"but last known date is {calendar_end}"
).format(
query_end=end_date,
calendar_end=dates[-1],
)
)
else:
raise ValueError("Query end %s not in calendar" % end_date)
return dates[start - shift], dates[end - shift] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/utils.py | utils.py |
from abc import abstractmethod, abstractproperty
import numpy as np
import pandas as pd
from six import viewvalues
from toolz import groupby
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.adjustment import (
Datetime641DArrayOverwrite,
Datetime64Overwrite,
Float641DArrayOverwrite,
Float64Multiply,
Float64Overwrite,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype
from zipline.pipeline.loaders.utils import (
ffill_across_cols,
last_in_date_group
)
INVALID_NUM_QTRS_MESSAGE = "Passed invalid number of quarters %s; " \
"must pass a number of quarters >= 0"
NEXT_FISCAL_QUARTER = 'next_fiscal_quarter'
NEXT_FISCAL_YEAR = 'next_fiscal_year'
NORMALIZED_QUARTERS = 'normalized_quarters'
PREVIOUS_FISCAL_QUARTER = 'previous_fiscal_quarter'
PREVIOUS_FISCAL_YEAR = 'previous_fiscal_year'
SHIFTED_NORMALIZED_QTRS = 'shifted_normalized_quarters'
SIMULATION_DATES = 'dates'
def normalize_quarters(years, quarters):
return years * 4 + quarters - 1
def split_normalized_quarters(normalized_quarters):
years = normalized_quarters // 4
quarters = normalized_quarters % 4
return years, quarters + 1
# These metadata columns are used to align event indexers.
metadata_columns = frozenset({
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
})
def required_estimates_fields(columns):
"""
Compute the set of resource columns required to serve
`columns`.
"""
# We also expect any of the field names that our loadable columns
# are mapped to.
return metadata_columns.union(viewvalues(columns))
def validate_column_specs(events, columns):
"""
Verify that the columns of ``events`` can be used by a
EarningsEstimatesLoader to serve the BoundColumns described by
`columns`.
"""
required = required_estimates_fields(columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EarningsEstimatesLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
def add_new_adjustments(adjustments_dict,
adjustments,
column_name,
ts):
try:
adjustments_dict[column_name][ts].extend(adjustments)
except KeyError:
adjustments_dict[column_name][ts] = adjustments
class EarningsEstimatesLoader(PipelineLoader):
"""
An abstract pipeline loader for estimates data that can load data a
variable number of quarters forwards/backwards from calendar dates
depending on the `num_announcements` attribute of the columns' dataset.
If split adjustments are to be applied, a loader, split-adjusted columns,
and the split-adjusted asof-date must be supplied.
Parameters
----------
estimates : pd.DataFrame
The raw estimates data.
``estimates`` must contain at least 5 columns:
sid : int64
The asset id associated with each estimate.
event_date : datetime64[ns]
The date on which the event that the estimate is for will/has
occurred..
timestamp : datetime64[ns]
The date on which we learned about the estimate.
fiscal_quarter : int64
The quarter during which the event has/will occur.
fiscal_year : int64
The year during which the event has/will occur.
name_map : dict[str -> str]
A map of names of BoundColumns that this loader will load to the
names of the corresponding columns in `events`.
"""
def __init__(self,
estimates,
name_map):
validate_column_specs(
estimates,
name_map
)
self.estimates = estimates[
estimates[EVENT_DATE_FIELD_NAME].notnull() &
estimates[FISCAL_QUARTER_FIELD_NAME].notnull() &
estimates[FISCAL_YEAR_FIELD_NAME].notnull()
]
self.estimates[NORMALIZED_QUARTERS] = normalize_quarters(
self.estimates[FISCAL_YEAR_FIELD_NAME],
self.estimates[FISCAL_QUARTER_FIELD_NAME],
)
self.array_overwrites_dict = {
datetime64ns_dtype: Datetime641DArrayOverwrite,
float64_dtype: Float641DArrayOverwrite,
}
self.scalar_overwrites_dict = {
datetime64ns_dtype: Datetime64Overwrite,
float64_dtype: Float64Overwrite,
}
self.name_map = name_map
@abstractmethod
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
raise NotImplementedError('get_zeroth_quarter_idx')
@abstractmethod
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
raise NotImplementedError('get_shifted_qtrs')
@abstractmethod
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments,
split_adjusted_asof_idx):
raise NotImplementedError('create_overwrite_for_estimate')
@abstractproperty
def searchsorted_side(self):
return NotImplementedError('searchsorted_side')
def get_requested_quarter_data(self,
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates):
"""
Selects the requested data for each date.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
zeroth_quarter_idx : pd.Index
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next or previous earnings estimate.
stacked_last_per_qtr : pd.DataFrame
The latest estimate known with the dates, normalized quarter, and
sid as the index.
num_announcements : int
The number of annoucements out the user requested relative to
each date in the calendar dates.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
Returns
--------
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns; `dates` are the index and columns are a MultiIndex
with sids at the top level and the dataset columns on the bottom.
"""
zero_qtr_data_idx = zero_qtr_data.index
requested_qtr_idx = pd.MultiIndex.from_arrays(
[
zero_qtr_data_idx.get_level_values(0),
zero_qtr_data_idx.get_level_values(1),
self.get_shifted_qtrs(
zeroth_quarter_idx.get_level_values(
NORMALIZED_QUARTERS,
),
num_announcements,
),
],
names=[
zero_qtr_data_idx.names[0],
zero_qtr_data_idx.names[1],
SHIFTED_NORMALIZED_QTRS,
],
)
requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx]
requested_qtr_data = requested_qtr_data.reset_index(
SHIFTED_NORMALIZED_QTRS,
)
# Calculate the actual year/quarter being requested and add those in
# as columns.
(requested_qtr_data[FISCAL_YEAR_FIELD_NAME],
requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \
split_normalized_quarters(
requested_qtr_data[SHIFTED_NORMALIZED_QTRS]
)
# Once we're left with just dates as the index, we can reindex by all
# dates so that we have a value for each calendar date.
return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates)
def get_split_adjusted_asof_idx(self, dates):
"""
Compute the index in `dates` where the split-adjusted-asof-date
falls. This is the date up to which, and including which, we will
need to unapply all adjustments for and then re-apply them as they
come in. After this date, adjustments are applied as normal.
Parameters
----------
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
Returns
-------
split_adjusted_asof_idx : int
The index in `dates` at which the data should be split.
"""
split_adjusted_asof_idx = dates.searchsorted(
self._split_adjusted_asof
)
# The split-asof date is after the date index.
if split_adjusted_asof_idx == len(dates):
split_adjusted_asof_idx = len(dates) - 1
elif self._split_adjusted_asof < dates[0].tz_localize(None):
split_adjusted_asof_idx = -1
return split_adjusted_asof_idx
def collect_overwrites_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_idx,
columns,
all_adjustments_for_sid,
sid):
"""
Given a sid, collect all overwrites that should be applied for this
sid at each quarter boundary.
Parameters
----------
group : pd.DataFrame
The data for `sid`.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_idx : int
The sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
all_adjustments_for_sid : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index for the given sid (`sid`). This dictionary is modified as
adjustments are collected.
sid : int
The sid for which overwrites should be computed.
"""
# If data was requested for only 1 date, there can never be any
# overwrites, so skip the extra work.
if len(dates) == 1:
return
next_qtr_start_indices = dates.searchsorted(
group[EVENT_DATE_FIELD_NAME].values,
side=self.searchsorted_side,
)
qtrs_with_estimates = group.index.get_level_values(
NORMALIZED_QUARTERS
).values
for idx in next_qtr_start_indices:
if 0 < idx < len(dates):
# Find the quarter being requested in the quarter we're
# crossing into.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid,
].iloc[idx]
# Only add adjustments if the next quarter starts somewhere
# in our date index for this sid. Our 'next' quarter can
# never start at index 0; a starting index of 0 means that
# the next quarter's event date was NaT.
self.create_overwrites_for_quarter(
all_adjustments_for_sid,
idx,
last_per_qtr,
qtrs_with_estimates,
requested_quarter,
sid,
sid_idx,
columns
)
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs):
"""
Parameters
----------
group : pd.DataFrame
The data for the given sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
requested_qtr_data : pd.DataFrame
The DataFrame with the latest values for the requested quarter
for all columns.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
sid_to_idx : dict[int -> int]
A dictionary mapping sid to he sid's index in the asset index.
columns : list of BoundColumn
The columns for which the overwrites should be computed.
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of the integer index of each timestamp into the date
index, mapped to adjustments that should be applied at that
index. This dictionary is for adjustments for ALL sids. It is
modified as adjustments are collected.
kwargs :
Additional arguments used in collecting adjustments; unused here.
"""
# Collect all adjustments for a given sid.
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def merge_into_adjustments_for_all_sids(self,
all_adjustments_for_sid,
col_to_all_adjustments):
"""
Merge adjustments for a particular sid into a dictionary containing
adjustments for all sids.
Parameters
----------
all_adjustments_for_sid : dict[int -> AdjustedArray]
All adjustments for a particular sid.
col_to_all_adjustments : dict[int -> AdjustedArray]
All adjustments for all sids.
"""
for col_name in all_adjustments_for_sid:
if col_name not in col_to_all_adjustments:
col_to_all_adjustments[col_name] = {}
for ts in all_adjustments_for_sid[col_name]:
adjs = all_adjustments_for_sid[col_name][ts]
add_new_adjustments(col_to_all_adjustments,
adjs,
col_name,
ts)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Creates an AdjustedArray from the given estimates data for the given
dates.
Parameters
----------
zero_qtr_data : pd.DataFrame
The 'time zero' data for each calendar date per sid.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
assets : pd.Int64Index
An index of all the assets from the raw data.
columns : list of BoundColumn
The columns for which adjustments need to be calculated.
kwargs :
Additional keyword arguments that should be forwarded to
`get_adjustments_for_sid` and to be used in computing adjustments
for each sid.
Returns
-------
col_to_all_adjustments : dict[int -> AdjustedArray]
A dictionary of all adjustments that should be applied.
"""
zero_qtr_data.sort_index(inplace=True)
# Here we want to get the LAST record from each group of records
# corresponding to a single quarter. This is to ensure that we select
# the most up-to-date event date in case the event date changes.
quarter_shifts = zero_qtr_data.groupby(
level=[SID_FIELD_NAME, NORMALIZED_QUARTERS]
).nth(-1)
col_to_all_adjustments = {}
sid_to_idx = dict(zip(assets, range(len(assets))))
quarter_shifts.groupby(level=SID_FIELD_NAME).apply(
self.get_adjustments_for_sid,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
**kwargs
)
return col_to_all_adjustments
def create_overwrites_for_quarter(self,
col_to_overwrites,
next_qtr_start_idx,
last_per_qtr,
quarters_with_estimates_for_sid,
requested_quarter,
sid,
sid_idx,
columns):
"""
Add entries to the dictionary of columns to adjustments for the given
sid and the given quarter.
Parameters
----------
col_to_overwrites : dict [column_name -> list of ArrayAdjustment]
A dictionary mapping column names to all overwrites for those
columns.
next_qtr_start_idx : int
The index of the first day of the next quarter in the calendar
dates.
last_per_qtr : pd.DataFrame
A DataFrame with a column MultiIndex of [self.estimates.columns,
normalized_quarters, sid] that allows easily getting the timeline
of estimates for a particular sid for a particular quarter; this
is particularly useful for getting adjustments for 'next'
estimates.
quarters_with_estimates_for_sid : np.array
An array of all quarters for which there are estimates for the
given sid.
requested_quarter : float
The quarter for which the overwrite should be created.
sid : int
The sid for which to create overwrites.
sid_idx : int
The index of the sid in `assets`.
columns : list of BoundColumn
The columns for which to create overwrites.
"""
for col in columns:
column_name = self.name_map[col.name]
if column_name not in col_to_overwrites:
col_to_overwrites[column_name] = {}
# If there are estimates for the requested quarter,
# overwrite all values going up to the starting index of
# that quarter with estimates for that quarter.
if requested_quarter in quarters_with_estimates_for_sid:
adjs = self.create_overwrite_for_estimate(
col,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
)
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
# There are no estimates for the quarter. Overwrite all
# values going up to the starting index of that quarter
# with the missing value for this column.
else:
adjs = [self.overwrite_with_null(
col,
next_qtr_start_idx,
sid_idx)]
add_new_adjustments(col_to_overwrites,
adjs,
column_name,
next_qtr_start_idx)
def overwrite_with_null(self,
column,
next_qtr_start_idx,
sid_idx):
return self.scalar_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
column.missing_value
)
def load_adjusted_array(self, columns, dates, assets, mask):
# Separate out getting the columns' datasets and the datasets'
# num_announcements attributes to ensure that we're catching the right
# AttributeError.
col_to_datasets = {col: col.dataset for col in columns}
try:
groups = groupby(lambda col:
col_to_datasets[col].num_announcements,
col_to_datasets)
except AttributeError:
raise AttributeError("Datasets loaded via the "
"EarningsEstimatesLoader must define a "
"`num_announcements` attribute that defines "
"how many quarters out the loader should load"
" the data relative to `dates`.")
if any(num_qtr < 0 for num_qtr in groups):
raise ValueError(
INVALID_NUM_QTRS_MESSAGE % ','.join(
str(qtr) for qtr in groups if qtr < 0
)
)
out = {}
# To optimize performance, only work below on assets that are
# actually in the raw data.
assets_with_data = set(assets) & set(self.estimates[SID_FIELD_NAME])
last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr(
assets_with_data,
columns,
dates
)
# Determine which quarter is immediately next/previous for each
# date.
zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr)
zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx]
for num_announcements, columns in groups.items():
requested_qtr_data = self.get_requested_quarter_data(
zero_qtr_data,
zeroth_quarter_idx,
stacked_last_per_qtr,
num_announcements,
dates,
)
# Calculate all adjustments for the given quarter and accumulate
# them for each column.
col_to_adjustments = self.get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns
)
# Lookup the asset indexer once, this is so we can reindex
# the assets returned into the assets requested for each column.
# This depends on the fact that our column multiindex has the same
# sids for each field. This allows us to do the lookup once on
# level 1 instead of doing the lookup each time per value in
# level 0.
asset_indexer = assets.get_indexer_for(
requested_qtr_data.columns.levels[1],
)
for col in columns:
column_name = self.name_map[col.name]
# allocate the empty output with the correct missing value
output_array = np.full(
(len(dates), len(assets)),
col.missing_value,
dtype=col.dtype,
)
# overwrite the missing value with values from the computed
# data
output_array[
:,
asset_indexer,
] = requested_qtr_data[column_name].values
out[col] = AdjustedArray(
output_array,
# There may not be any adjustments at all (e.g. if
# len(date) == 1), so provide a default.
dict(col_to_adjustments.get(column_name, {})),
col.missing_value,
)
return out
def get_last_data_per_qtr(self, assets_with_data, columns, dates):
"""
Determine the last piece of information we know for each column on each
date in the index for each sid and quarter.
Parameters
----------
assets_with_data : pd.Index
Index of all assets that appear in the raw data given to the
loader.
columns : iterable of BoundColumn
The columns that need to be loaded from the raw data.
dates : pd.DatetimeIndex
The calendar of dates for which data should be loaded.
Returns
-------
stacked_last_per_qtr : pd.DataFrame
A DataFrame indexed by [dates, sid, normalized_quarters] that has
the latest information for each row of the index, sorted by event
date.
last_per_qtr : pd.DataFrame
A DataFrame with columns that are a MultiIndex of [
self.estimates.columns, normalized_quarters, sid].
"""
# Get a DataFrame indexed by date with a MultiIndex of columns of [
# self.estimates.columns, normalized_quarters, sid], where each cell
# contains the latest data for that day.
last_per_qtr = last_in_date_group(
self.estimates,
dates,
assets_with_data,
reindex=True,
extra_groupers=[NORMALIZED_QUARTERS],
)
# Forward fill values for each quarter/sid/dataset column.
ffill_across_cols(last_per_qtr, columns, self.name_map)
# Stack quarter and sid into the index.
stacked_last_per_qtr = last_per_qtr.stack(
[SID_FIELD_NAME, NORMALIZED_QUARTERS],
)
# Set date index name for ease of reference
stacked_last_per_qtr.index.set_names(
SIMULATION_DATES,
level=0,
inplace=True,
)
stacked_last_per_qtr = stacked_last_per_qtr.sort_values(
EVENT_DATE_FIELD_NAME,
)
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime(
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME]
)
return last_per_qtr, stacked_last_per_qtr
class NextEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'right'
def create_overwrite_for_estimate(self,
column,
column_name,
last_per_qtr,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None):
return [self.array_overwrites_dict[column.dtype](
0,
next_qtr_start_idx - 1,
sid_idx,
sid_idx,
last_per_qtr[
column_name,
requested_quarter,
sid,
].values[:next_qtr_start_idx],
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs + (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the next quarter by picking out the upcoming release for
each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
next_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a next event.
"""
next_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] >=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(0)
return next_releases_per_date.index
class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader):
searchsorted_side = 'left'
def create_overwrite_for_estimate(self,
column,
column_name,
dates,
next_qtr_start_idx,
requested_quarter,
sid,
sid_idx,
col_to_split_adjustments=None,
split_adjusted_asof_idx=None,
split_dict=None):
return [self.overwrite_with_null(
column,
next_qtr_start_idx,
sid_idx,
)]
def get_shifted_qtrs(self, zero_qtrs, num_announcements):
return zero_qtrs - (num_announcements - 1)
def get_zeroth_quarter_idx(self, stacked_last_per_qtr):
"""
Filters for releases that are on or after each simulation date and
determines the previous quarter by picking out the most recent
release relative to each date in the index.
Parameters
----------
stacked_last_per_qtr : pd.DataFrame
A DataFrame with index of calendar dates, sid, and normalized
quarters with each row being the latest estimate for the row's
index values, sorted by event date.
Returns
-------
previous_releases_per_date_index : pd.MultiIndex
An index of calendar dates, sid, and normalized quarters, for only
the rows that have a previous event.
"""
previous_releases_per_date = stacked_last_per_qtr.loc[
stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <=
stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES)
].groupby(
level=[SIMULATION_DATES, SID_FIELD_NAME],
as_index=False,
# Here we take advantage of the fact that `stacked_last_per_qtr` is
# sorted by event date.
).nth(-1)
return previous_releases_per_date.index
def validate_split_adjusted_column_specs(name_map, columns):
to_be_split = set(columns)
available = set(name_map.keys())
extra = to_be_split - available
if extra:
raise ValueError(
"EarningsEstimatesLoader got the following extra columns to be "
"split-adjusted: {extra}.\n"
"Got Columns: {to_be_split}\n"
"Available Columns: {available}".format(
extra=sorted(extra),
to_be_split=sorted(to_be_split),
available=sorted(available),
)
)
class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader):
"""
Estimates loader that loads data that needs to be split-adjusted.
Parameters
----------
split_adjustments_loader : SQLiteAdjustmentReader
The loader to use for reading split adjustments.
split_adjusted_column_names : iterable of str
The column names that should be split-adjusted.
split_adjusted_asof : pd.Timestamp
The date that separates data into 2 halves: the first half is the set
of dates up to and including the split_adjusted_asof date. All
adjustments occurring during this first half are applied to all
dates in this first half. The second half is the set of dates after
the split_adjusted_asof date. All adjustments occurring during this
second half are applied sequentially as they appear in the timeline.
"""
def __init__(self,
estimates,
name_map,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof):
validate_split_adjusted_column_specs(name_map,
split_adjusted_column_names)
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
self._split_adjustment_dict = {}
super(SplitAdjustedEstimatesLoader, self).__init__(
estimates,
name_map
)
@abstractmethod
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
raise NotImplementedError('collect_split_adjustments')
def get_adjustments_for_sid(self,
group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx,
columns,
col_to_all_adjustments,
split_adjusted_asof_idx=None,
split_adjusted_cols_for_group=None):
"""
Collects both overwrites and adjustments for a particular sid.
Parameters
----------
split_adjusted_asof_idx : int
The integer index of the date on which the data was split-adjusted.
split_adjusted_cols_for_group : list of str
The names of requested columns that should also be split-adjusted.
"""
all_adjustments_for_sid = {}
sid = int(group.name)
self.collect_overwrites_for_sid(group,
dates,
requested_qtr_data,
last_per_qtr,
sid_to_idx[sid],
columns,
all_adjustments_for_sid,
sid)
(pre_adjustments,
post_adjustments) = self.retrieve_split_adjustment_data_for_sid(
dates, sid, split_adjusted_asof_idx
)
sid_estimates = self.estimates[
self.estimates[SID_FIELD_NAME] == sid
]
# We might not have any overwrites but still have
# adjustments, and we will need to manually add columns if
# that is the case.
for col_name in split_adjusted_cols_for_group:
if col_name not in all_adjustments_for_sid:
all_adjustments_for_sid[col_name] = {}
self.collect_split_adjustments(
all_adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_to_idx[sid],
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
split_adjusted_cols_for_group
)
self.merge_into_adjustments_for_all_sids(
all_adjustments_for_sid, col_to_all_adjustments
)
def get_adjustments(self,
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
**kwargs):
"""
Calculates both split adjustments and overwrites for all sids.
"""
split_adjusted_cols_for_group = [
self.name_map[col.name]
for col in columns
if self.name_map[col.name] in self._split_adjusted_column_names
]
# Add all splits to the adjustment dict for this sid.
split_adjusted_asof_idx = self.get_split_adjusted_asof_idx(
dates
)
return super(SplitAdjustedEstimatesLoader, self).get_adjustments(
zero_qtr_data,
requested_qtr_data,
last_per_qtr,
dates,
assets,
columns,
split_adjusted_cols_for_group=split_adjusted_cols_for_group,
split_adjusted_asof_idx=split_adjusted_asof_idx
)
def determine_end_idx_for_adjustment(self,
adjustment_ts,
dates,
upper_bound,
requested_quarter,
sid_estimates):
"""
Determines the date until which the adjustment at the given date
index should be applied for the given quarter.
Parameters
----------
adjustment_ts : pd.Timestamp
The timestamp at which the adjustment occurs.
dates : pd.DatetimeIndex
The calendar dates over which the Pipeline is being computed.
upper_bound : int
The index of the upper bound in the calendar dates. This is the
index until which the adjusment will be applied unless there is
information for the requested quarter that comes in on or before
that date.
requested_quarter : float
The quarter for which we are determining how the adjustment
should be applied.
sid_estimates : pd.DataFrame
The DataFrame of estimates data for the sid for which we're
applying the given adjustment.
Returns
-------
end_idx : int
The last index to which the adjustment should be applied for the
given quarter/sid.
"""
end_idx = upper_bound
# Find the next newest kd that happens on or after
# the date of this adjustment
newest_kd_for_qtr = sid_estimates[
(sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) &
(sid_estimates[TS_FIELD_NAME] >= adjustment_ts)
][TS_FIELD_NAME].min()
if pd.notnull(newest_kd_for_qtr):
newest_kd_idx = dates.searchsorted(
newest_kd_for_qtr
)
# We have fresh information that comes in
# before the end of the overwrite and
# presumably is already split-adjusted to the
# current split. We should stop applying the
# adjustment the day before this new
# information comes in.
if newest_kd_idx <= upper_bound:
end_idx = newest_kd_idx - 1
return end_idx
def collect_pre_split_asof_date_adjustments(
self,
split_adjusted_asof_date_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
):
"""
Collect split adjustments that occur before the
split-adjusted-asof-date. All those adjustments must first be
UN-applied at the first date index and then re-applied on the
appropriate dates in order to match point in time share pricing data.
Parameters
----------
split_adjusted_asof_date_idx : int
The index in the calendar dates as-of which all data was
split-adjusted.
sid_idx : int
The index of the sid for which adjustments should be collected in
the adjusted array.
pre_adjustments : tuple(list(float), list(int))
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred on or before the
split-asof-date.
"""
col_to_split_adjustments = {}
if len(pre_adjustments[0]):
adjustment_values, date_indexes = pre_adjustments
for column_name in requested_split_adjusted_columns:
col_to_split_adjustments[column_name] = {}
# We need to undo all adjustments that happen before the
# split_asof_date here by reversing the split ratio.
col_to_split_adjustments[column_name][0] = [Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
1 / future_adjustment
) for future_adjustment in adjustment_values]
for adjustment, date_index in zip(adjustment_values,
date_indexes):
adj = Float64Multiply(
0,
split_adjusted_asof_date_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(col_to_split_adjustments,
[adj],
column_name,
date_index)
return col_to_split_adjustments
def collect_post_asof_split_adjustments(self,
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns):
"""
Collect split adjustments that occur after the
split-adjusted-asof-date. Each adjustment needs to be applied to all
dates on which knowledge for the requested quarter was older than the
date of the adjustment.
Parameters
----------
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for this sid.
requested_split_adjusted_columns : list of str
The requested split adjusted columns.
Returns
-------
col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]]
The adjustments for this sid that occurred after the
split-asof-date.
"""
col_to_split_adjustments = {}
if post_adjustments:
# Get an integer index
requested_qtr_timeline = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS
][sid].reset_index()
requested_qtr_timeline = requested_qtr_timeline[
requested_qtr_timeline[sid].notnull()
]
# Split the data into range by quarter and determine which quarter
# was being requested in each range.
# Split integer indexes up by quarter range
qtr_ranges_idxs = np.split(
requested_qtr_timeline.index,
np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1
)
requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]]
for r in qtr_ranges_idxs]
# Try to apply each adjustment to each quarter range.
for i, qtr_range in enumerate(qtr_ranges_idxs):
for adjustment, date_index, timestamp in zip(
*post_adjustments
):
# In the default case, apply through the end of the quarter
upper_bound = qtr_range[-1]
# Find the smallest KD in estimates that is on or after the
# date of the given adjustment. Apply the given adjustment
# until that KD.
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
requested_qtr_data.index,
upper_bound,
requested_quarters_per_range[i],
sid_estimates
)
# In the default case, apply adjustment on the first day of
# the quarter.
start_idx = qtr_range[0]
# If the adjustment happens during this quarter, apply the
# adjustment on the day it happens.
if date_index > start_idx:
start_idx = date_index
# We only want to apply the adjustment if we have any stale
# data to apply it to.
if qtr_range[0] <= end_idx:
for column_name in requested_split_adjusted_columns:
if column_name not in col_to_split_adjustments:
col_to_split_adjustments[column_name] = {}
adj = Float64Multiply(
# Always apply from first day of qtr
qtr_range[0],
end_idx,
sid_idx,
sid_idx,
adjustment
)
add_new_adjustments(
col_to_split_adjustments,
[adj],
column_name,
start_idx
)
return col_to_split_adjustments
def retrieve_split_adjustment_data_for_sid(self,
dates,
sid,
split_adjusted_asof_idx):
"""
dates : pd.DatetimeIndex
The calendar dates.
sid : int
The sid for which we want to retrieve adjustments.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
Returns
-------
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
"""
adjustments = self._split_adjustments.get_adjustments_for_sid(
'splits', sid
)
sorted(adjustments, key=lambda adj: adj[0])
# Get rid of any adjustments that happen outside of our date index.
adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1],
adjustments))
adjustment_values = np.array([adj[1] for adj in adjustments])
timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments])
# We need the first date on which we would have known about each
# adjustment.
date_indexes = dates.searchsorted(timestamps)
pre_adjustment_idxs = np.where(
date_indexes <= split_adjusted_asof_idx
)[0]
last_adjustment_split_asof_idx = -1
if len(pre_adjustment_idxs):
last_adjustment_split_asof_idx = pre_adjustment_idxs.max()
pre_adjustments = (
adjustment_values[:last_adjustment_split_asof_idx + 1],
date_indexes[:last_adjustment_split_asof_idx + 1]
)
post_adjustments = (
adjustment_values[last_adjustment_split_asof_idx + 1:],
date_indexes[last_adjustment_split_asof_idx + 1:],
timestamps[last_adjustment_split_asof_idx + 1:]
)
return pre_adjustments, post_adjustments
def _collect_adjustments(self,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments(
split_adjusted_asof_idx,
sid_idx,
pre_adjustments,
requested_split_adjusted_columns
)
post_adjustments_dict = self.collect_post_asof_split_adjustments(
post_adjustments,
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
requested_split_adjusted_columns
)
return pre_adjustments_dict, post_adjustments_dict
def merge_split_adjustments_with_overwrites(
self,
pre,
post,
overwrites,
requested_split_adjusted_columns
):
"""
Merge split adjustments with the dict containing overwrites.
Parameters
----------
pre : dict[str -> dict[int -> list]]
The adjustments that occur before the split-adjusted-asof-date.
post : dict[str -> dict[int -> list]]
The adjustments that occur after the split-adjusted-asof-date.
overwrites : dict[str -> dict[int -> list]]
The overwrites across all time. Adjustments will be merged into
this dictionary.
requested_split_adjusted_columns : list of str
List of names of split adjusted columns that are being requested.
"""
for column_name in requested_split_adjusted_columns:
# We can do a merge here because the timestamps in 'pre' and
# 'post' are guaranteed to not overlap.
if pre:
# Either empty or contains all columns.
for ts in pre[column_name]:
add_new_adjustments(
overwrites,
pre[column_name][ts],
column_name,
ts
)
if post:
# Either empty or contains all columns.
for ts in post[column_name]:
add_new_adjustments(
overwrites,
post[column_name][ts],
column_name,
ts
)
class PreviousSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for previous quarters and apply them to the
given dictionary of splits for the given sid. Since overwrites just
replace all estimates before the new quarter with NaN, we don't need to
worry about re-applying split adjustments.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
)
class NextSplitAdjustedEarningsEstimatesLoader(
SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader
):
def collect_split_adjustments(self,
adjustments_for_sid,
requested_qtr_data,
dates,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns):
"""
Collect split adjustments for future quarters. Re-apply adjustments
that would be overwritten by overwrites. Merge split adjustments with
overwrites into the given dictionary of splits for the given sid.
Parameters
----------
adjustments_for_sid : dict[str -> dict[int -> list]]
The dictionary of adjustments to which splits need to be added.
Initially it contains only overwrites.
requested_qtr_data : pd.DataFrame
The requested quarter data for each calendar date per sid.
dates : pd.DatetimeIndex
The calendar dates for which estimates data is requested.
sid : int
The sid for which adjustments need to be collected.
sid_idx : int
The index of `sid` in the adjusted array.
sid_estimates : pd.DataFrame
The raw estimates data for the given sid.
split_adjusted_asof_idx : int
The index in `dates` as-of which the data is split adjusted.
pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values and indexes in `dates` for
adjustments that happened before the split-asof-date.
post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex)
The adjustment values, indexes in `dates`, and timestamps for
adjustments that happened after the split-asof-date.
requested_split_adjusted_columns : list of str
List of requested split adjusted column names.
"""
(pre_adjustments_dict,
post_adjustments_dict) = self._collect_adjustments(
requested_qtr_data,
sid,
sid_idx,
sid_estimates,
split_adjusted_asof_idx,
pre_adjustments,
post_adjustments,
requested_split_adjusted_columns,
)
for column_name in requested_split_adjusted_columns:
for overwrite_ts in adjustments_for_sid[column_name]:
# We need to cumulatively re-apply all adjustments up to the
# split-adjusted-asof-date. We might not have any
# pre-adjustments, so we should check for that.
if overwrite_ts <= split_adjusted_asof_idx \
and pre_adjustments_dict:
for split_ts in pre_adjustments_dict[column_name]:
# The split has to have occurred during the span of
# the overwrite.
if split_ts < overwrite_ts:
# Create new adjustments here so that we can
# re-apply all applicable adjustments to ONLY
# the dates being overwritten.
adjustments_for_sid[
column_name
][overwrite_ts].extend([
Float64Multiply(
0,
overwrite_ts - 1,
sid_idx,
sid_idx,
adjustment.value
)
for adjustment
in pre_adjustments_dict[
column_name
][split_ts]
])
# After the split-adjusted-asof-date, we need to re-apply all
# adjustments that occur after that date and within the
# bounds of the overwrite. They need to be applied starting
# from the first date and until an end date. The end date is
# the date of the newest information we get about
# `requested_quarter` that is >= `split_ts`, or if there is no
# new knowledge before `overwrite_ts`, then it is the date
# before `overwrite_ts`.
else:
# Overwrites happen at the first index of a new quarter,
# so determine here which quarter that is.
requested_quarter = requested_qtr_data[
SHIFTED_NORMALIZED_QTRS, sid
].iloc[overwrite_ts]
for adjustment_value, date_index, timestamp in zip(
*post_adjustments
):
if split_adjusted_asof_idx < date_index < overwrite_ts:
# Assume the entire overwrite contains stale data
upper_bound = overwrite_ts - 1
end_idx = self.determine_end_idx_for_adjustment(
timestamp,
dates,
upper_bound,
requested_quarter,
sid_estimates
)
adjustments_for_sid[
column_name
][overwrite_ts].append(
Float64Multiply(
0,
end_idx,
sid_idx,
sid_idx,
adjustment_value
)
)
self.merge_split_adjustments_with_overwrites(
pre_adjustments_dict,
post_adjustments_dict,
adjustments_for_sid,
requested_split_adjusted_columns
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/earnings_estimates.py | earnings_estimates.py |
import numpy as np
import pandas as pd
from six import viewvalues
from toolz import groupby, merge
from .base import PipelineLoader
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.utils import (
next_event_indexer,
previous_event_indexer,
)
def required_event_fields(next_value_columns, previous_value_columns):
"""
Compute the set of resource columns required to serve
``next_value_columns`` and ``previous_value_columns``.
"""
# These metadata columns are used to align event indexers.
return {
TS_FIELD_NAME,
SID_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
}.union(
# We also expect any of the field names that our loadable columns
# are mapped to.
viewvalues(next_value_columns),
viewvalues(previous_value_columns),
)
def validate_column_specs(events, next_value_columns, previous_value_columns):
"""
Verify that the columns of ``events`` can be used by an EventsLoader to
serve the BoundColumns described by ``next_value_columns`` and
``previous_value_columns``.
"""
required = required_event_fields(next_value_columns,
previous_value_columns)
received = set(events.columns)
missing = required - received
if missing:
raise ValueError(
"EventsLoader missing required columns {missing}.\n"
"Got Columns: {received}\n"
"Expected Columns: {required}".format(
missing=sorted(missing),
received=sorted(received),
required=sorted(required),
)
)
class EventsLoader(PipelineLoader):
"""
Base class for PipelineLoaders that supports loading the next and previous
value of an event field.
Does not currently support adjustments.
Parameters
----------
events : pd.DataFrame
A DataFrame representing events (e.g. share buybacks or
earnings announcements) associated with particular companies.
``events`` must contain at least three columns::
sid : int64
The asset id associated with each event.
event_date : datetime64[ns]
The date on which the event occurred.
timestamp : datetime64[ns]
The date on which we learned about the event.
next_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a next event value.
previous_value_columns : dict[BoundColumn -> str]
Map from dataset columns to raw field names that should be used when
searching for a previous event value.
"""
def __init__(self,
events,
next_value_columns,
previous_value_columns):
validate_column_specs(
events,
next_value_columns,
previous_value_columns,
)
events = events[events[EVENT_DATE_FIELD_NAME].notnull()]
# We always work with entries from ``events`` directly as numpy arrays,
# so we coerce from a frame to a dict of arrays here.
self.events = {
name: np.asarray(series)
for name, series in (
events.sort_values(EVENT_DATE_FIELD_NAME).iteritems()
)
}
# Columns to load with self.load_next_events.
self.next_value_columns = next_value_columns
# Columns to load with self.load_previous_events.
self.previous_value_columns = previous_value_columns
def split_next_and_previous_event_columns(self, requested_columns):
"""
Split requested columns into columns that should load the next known
value and columns that should load the previous known value.
Parameters
----------
requested_columns : iterable[BoundColumn]
Returns
-------
next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn]
``requested_columns``, partitioned into sub-sequences based on
whether the column should produce values from the next event or the
previous event
"""
def next_or_previous(c):
if c in self.next_value_columns:
return 'next'
elif c in self.previous_value_columns:
return 'previous'
raise ValueError(
"{c} not found in next_value_columns "
"or previous_value_columns".format(c=c)
)
groups = groupby(next_or_previous, requested_columns)
return groups.get('next', ()), groups.get('previous', ())
def next_event_indexer(self, dates, sids):
return next_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def previous_event_indexer(self, dates, sids):
return previous_event_indexer(
dates,
sids,
self.events[EVENT_DATE_FIELD_NAME],
self.events[TS_FIELD_NAME],
self.events[SID_FIELD_NAME],
)
def load_next_events(self, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.next_value_columns,
indexer=self.next_event_indexer(dates, sids),
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def load_previous_events(self, columns, dates, sids, mask):
if not columns:
return {}
return self._load_events(
name_map=self.previous_value_columns,
indexer=self.previous_event_indexer(dates, sids),
columns=columns,
dates=dates,
sids=sids,
mask=mask,
)
def _load_events(self, name_map, indexer, columns, dates, sids, mask):
def to_frame(array):
return pd.DataFrame(array, index=dates, columns=sids)
assert indexer.shape == (len(dates), len(sids))
out = {}
for c in columns:
# Array holding the value for column `c` for every event we have.
col_array = self.events[name_map[c]]
if not len(col_array):
# We don't have **any** events, so return col.missing_value
# every day for every sid. We have to special case empty events
# because in normal branch we depend on being able to index
# with -1 for missing values, which fails if there are no
# events at all.
raw = np.full(
(len(dates), len(sids)), c.missing_value, dtype=c.dtype
)
else:
# Slot event values into sid/date locations using `indexer`.
# This produces a 2D array of the same shape as `indexer`,
# which must be (len(dates), len(sids))`.
raw = col_array[indexer]
# indexer will be -1 for locations where we don't have a known
# value. Overwrite those locations with c.missing_value.
raw[indexer < 0] = c.missing_value
# Delegate the actual array formatting logic to a DataFrameLoader.
loader = DataFrameLoader(c, to_frame(raw), adjustments=None)
out[c] = loader.load_adjusted_array([c], dates, sids, mask)[c]
return out
def load_adjusted_array(self, columns, dates, sids, mask):
n, p = self.split_next_and_previous_event_columns(columns)
return merge(
self.load_next_events(n, dates, sids, mask),
self.load_previous_events(p, dates, sids, mask),
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/events.py | events.py |
from __future__ import division, absolute_import
from abc import ABCMeta, abstractproperty
from collections import namedtuple
from functools import partial
from itertools import count
import warnings
from weakref import WeakKeyDictionary
import blaze as bz
from datashape import (
Date,
DateTime,
Option,
String,
isrecord,
isscalar,
integral,
)
import numpy as np
from odo import odo
import pandas as pd
from six import with_metaclass, PY2, itervalues, iteritems
from toolz import (
complement,
compose,
first,
flip,
groupby,
memoize,
merge,
)
import toolz.curried.operator as op
from toolz.curried.operator import getitem
from zipline.pipeline.common import (
AD_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME
)
from zipline.pipeline.data.dataset import DataSet, Column
from zipline.pipeline.loaders.utils import (
check_data_query_args,
normalize_data_query_bounds,
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.utils.input_validation import (
expect_element,
ensure_timezone,
optionally,
)
from zipline.utils.pool import SequentialPool
from zipline.utils.preprocess import preprocess
from ._core import ( # noqa
adjusted_arrays_from_rows_with_assets,
adjusted_arrays_from_rows_without_assets,
baseline_arrays_from_rows_with_assets, # reexport
baseline_arrays_from_rows_without_assets, # reexport
getname,
)
valid_deltas_node_types = (
bz.expr.Field,
bz.expr.ReLabel,
bz.expr.Symbol,
)
traversable_nodes = (
bz.expr.Field,
bz.expr.Label,
)
is_invalid_deltas_node = complement(flip(isinstance, valid_deltas_node_types))
get__name__ = op.attrgetter('__name__')
class InvalidField(with_metaclass(ABCMeta)):
"""A field that raises an exception indicating that the
field was invalid.
Parameters
----------
field : str
The name of the field.
type_ : dshape
The shape of the field.
"""
@abstractproperty
def error_format(self): # pragma: no cover
raise NotImplementedError('error_format')
def __init__(self, field, type_):
self._field = field
self._type = type_
def __get__(self, instance, owner):
raise AttributeError(
self.error_format.format(field=self._field, type_=self._type),
)
class NonNumpyField(InvalidField):
error_format = (
"field '{field}' was a non numpy compatible type: '{type_}'"
)
class NonPipelineField(InvalidField):
error_format = (
"field '{field}' was a non Pipeline API compatible type: '{type_}'"
)
_new_names = ('BlazeDataSet_%d' % n for n in count())
def datashape_type_to_numpy(type_):
"""
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype.
"""
if isinstance(type_, Option):
type_ = type_.ty
if isinstance(type_, DateTime):
return np.dtype('datetime64[ns]')
if isinstance(type_, String):
return np.dtype(object)
if type_ in integral:
return np.dtype('int64')
else:
return type_.to_numpy_dtype()
@memoize
def new_dataset(expr, missing_values):
"""
Creates or returns a dataset from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression representing the values.
missing_values : frozenset((name, value) pairs
Association pairs column name and missing_value for that column.
This needs to be a frozenset rather than a dict or tuple of tuples
because we want a collection that's unordered but still hashable.
Returns
-------
ds : type
A new dataset type.
Notes
-----
This function is memoized. repeated calls with the same inputs will return
the same type.
"""
missing_values = dict(missing_values)
class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1}
for name, type_ in expr.dshape.measure.fields:
# Don't generate a column for sid or timestamp, since they're
# implicitly the labels if the arrays that will be passed to pipeline
# Terms.
if name in (SID_FIELD_NAME, TS_FIELD_NAME):
continue
type_ = datashape_type_to_numpy(type_)
if can_represent_dtype(type_):
col = Column(
type_,
missing_values.get(name, NotSpecified),
)
else:
col = NonPipelineField(name, type_)
class_dict[name] = col
name = expr._name
if name is None:
name = next(_new_names)
# unicode is a name error in py3 but the branch is only hit
# when we are in python 2.
if PY2 and isinstance(name, unicode): # pragma: no cover # noqa
name = name.encode('utf-8')
return type(name, (DataSet,), class_dict)
def _check_resources(name, expr, resources):
"""Validate that the expression and resources passed match up.
Parameters
----------
name : str
The name of the argument we are checking.
expr : Expr
The potentially bound expr.
resources
The explicitly passed resources to compute expr.
Raises
------
ValueError
If the resources do not match for an expression.
"""
if expr is None:
return
bound = expr._resources()
if not bound and resources is None:
raise ValueError('no resources provided to compute %s' % name)
if bound and resources:
raise ValueError(
'explicit and implicit resources provided to compute %s' % name,
)
def _check_datetime_field(name, measure):
"""Check that a field is a datetime inside some measure.
Parameters
----------
name : str
The name of the field to check.
measure : Record
The record to check the field of.
Raises
------
TypeError
If the field is not a datetime inside ``measure``.
"""
if not isinstance(measure[name], (Date, DateTime)):
raise TypeError(
"'{name}' field must be a '{dt}', not: '{dshape}'".format(
name=name,
dt=DateTime(),
dshape=measure[name],
),
)
class NoMetaDataWarning(UserWarning):
"""Warning used to signal that no deltas or checkpoints could be found and
none were provided.
Parameters
----------
expr : Expr
The expression that was searched.
field : {'deltas', 'checkpoints'}
The field that was looked up.
"""
def __init__(self, expr, field):
self._expr = expr
self._field = field
def __str__(self):
return 'No %s could be inferred from expr: %s' % (
self._field,
self._expr,
)
no_metadata_rules = frozenset({'warn', 'raise', 'ignore'})
def _get_metadata(field, expr, metadata_expr, no_metadata_rule):
"""Find the correct metadata expression for the expression.
Parameters
----------
field : {'deltas', 'checkpoints'}
The kind of metadata expr to lookup.
expr : Expr
The baseline expression.
metadata_expr : Expr, 'auto', or None
The metadata argument. If this is 'auto', then the metadata table will
be searched for by walking up the expression tree. If this cannot be
reflected, then an action will be taken based on the
``no_metadata_rule``.
no_metadata_rule : {'warn', 'raise', 'ignore'}
How to handle the case where the metadata_expr='auto' but no expr
could be found.
Returns
-------
metadata : Expr or None
The deltas or metadata table to use.
"""
if isinstance(metadata_expr, bz.Expr) or metadata_expr is None:
return metadata_expr
try:
return expr._child['_'.join(((expr._name or ''), field))]
except (ValueError, AttributeError):
if no_metadata_rule == 'raise':
raise ValueError(
"no %s table could be reflected for %s" % (field, expr)
)
elif no_metadata_rule == 'warn':
warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4)
return None
def _ad_as_ts(expr):
"""Duplicate the asof_date column as the timestamp column.
Parameters
----------
expr : Expr or None
The expression to change the columns of.
Returns
-------
transformed : Expr or None
The transformed expression or None if ``expr`` is None.
"""
return (
None
if expr is None else
bz.transform(expr, **{TS_FIELD_NAME: expr[AD_FIELD_NAME]})
)
def _ensure_timestamp_field(dataset_expr, deltas, checkpoints):
"""Verify that the baseline and deltas expressions have a timestamp field.
If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will
be copied from the ``AD_FIELD_NAME``. If one is provided, then we will
verify that it is the correct dshape.
Parameters
----------
dataset_expr : Expr
The baseline expression.
deltas : Expr or None
The deltas expression if any was provided.
checkpoints : Expr or None
The checkpoints expression if any was provided.
Returns
-------
dataset_expr, deltas : Expr
The new baseline and deltas expressions to use.
"""
measure = dataset_expr.dshape.measure
if TS_FIELD_NAME not in measure.names:
dataset_expr = bz.transform(
dataset_expr,
**{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]}
)
deltas = _ad_as_ts(deltas)
checkpoints = _ad_as_ts(checkpoints)
else:
_check_datetime_field(TS_FIELD_NAME, measure)
return dataset_expr, deltas, checkpoints
@expect_element(
no_deltas_rule=no_metadata_rules,
no_checkpoints_rule=no_metadata_rules,
)
def from_blaze(expr,
deltas='auto',
checkpoints='auto',
loader=None,
resources=None,
odo_kwargs=None,
missing_values=None,
no_deltas_rule='warn',
no_checkpoints_rule='warn'):
"""Create a Pipeline API object from a blaze expression.
Parameters
----------
expr : Expr
The blaze expression to use.
deltas : Expr, 'auto' or None, optional
The expression to use for the point in time adjustments.
If the string 'auto' is passed, a deltas expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_deltas'. If None is passed, no
deltas will be used.
checkpoints : Expr, 'auto' or None, optional
The expression to use for the forward fill checkpoints.
If the string 'auto' is passed, a checkpoints expr will be looked up
by stepping up the expression tree and looking for another field
with the name of ``expr._name`` + '_checkpoints'. If None is passed,
no checkpoints will be used.
loader : BlazeLoader, optional
The blaze loader to attach this pipeline dataset to. If None is passed,
the global blaze loader is used.
resources : dict or any, optional
The data to execute the blaze expressions against. This is used as the
scope for ``bz.compute``.
odo_kwargs : dict, optional
The keyword arguments to pass to odo when evaluating the expressions.
missing_values : dict[str -> any], optional
A dict mapping column names to missing values for those columns.
Missing values are required for integral columns.
no_deltas_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``deltas='auto'`` but no deltas can be found.
'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
no_checkpoints_rule : {'warn', 'raise', 'ignore'}, optional
What should happen if ``checkpoints='auto'`` but no checkpoints can be
found. 'warn' says to raise a warning but continue.
'raise' says to raise an exception if no deltas can be found.
'ignore' says take no action and proceed with no deltas.
Returns
-------
pipeline_api_obj : DataSet or BoundColumn
Either a new dataset or bound column based on the shape of the expr
passed in. If a table shaped expression is passed, this will return
a ``DataSet`` that represents the whole table. If an array-like shape
is passed, a ``BoundColumn`` on the dataset that would be constructed
from passing the parent is returned.
"""
if 'auto' in {deltas, checkpoints}:
invalid_nodes = tuple(filter(is_invalid_deltas_node, expr._subterms()))
if invalid_nodes:
raise TypeError(
'expression with auto %s may only contain (%s) nodes,'
" found: %s" % (
' or '.join(
['deltas'] if deltas is not None else [] +
['checkpoints'] if checkpoints is not None else [],
),
', '.join(map(get__name__, valid_deltas_node_types)),
', '.join(
set(map(compose(get__name__, type), invalid_nodes)),
),
),
)
deltas = _get_metadata(
'deltas',
expr,
deltas,
no_deltas_rule,
)
checkpoints = _get_metadata(
'checkpoints',
expr,
checkpoints,
no_checkpoints_rule,
)
# Check if this is a single column out of a dataset.
if bz.ndim(expr) != 1:
raise TypeError(
'expression was not tabular or array-like,'
' %s dimensions: %d' % (
'too many' if bz.ndim(expr) > 1 else 'not enough',
bz.ndim(expr),
),
)
single_column = None
if isscalar(expr.dshape.measure):
# This is a single column. Record which column we are to return
# but create the entire dataset.
single_column = rename = expr._name
field_hit = False
if not isinstance(expr, traversable_nodes):
raise TypeError(
"expression '%s' was array-like but not a simple field of"
" some larger table" % str(expr),
)
while isinstance(expr, traversable_nodes):
if isinstance(expr, bz.expr.Field):
if not field_hit:
field_hit = True
else:
break
rename = expr._name
expr = expr._child
dataset_expr = expr.relabel({rename: single_column})
else:
dataset_expr = expr
measure = dataset_expr.dshape.measure
if not isrecord(measure) or AD_FIELD_NAME not in measure.names:
raise TypeError(
"The dataset must be a collection of records with at least an"
" '{ad}' field. Fields provided: '{fields}'\nhint: maybe you need"
" to use `relabel` to change your field names".format(
ad=AD_FIELD_NAME,
fields=measure,
),
)
_check_datetime_field(AD_FIELD_NAME, measure)
dataset_expr, deltas, checkpoints = _ensure_timestamp_field(
dataset_expr,
deltas,
checkpoints,
)
if deltas is not None and (sorted(deltas.dshape.measure.fields) !=
sorted(measure.fields)):
raise TypeError(
'baseline measure != deltas measure:\n%s != %s' % (
measure,
deltas.dshape.measure,
),
)
if (checkpoints is not None and
(sorted(checkpoints.dshape.measure.fields) !=
sorted(measure.fields))):
raise TypeError(
'baseline measure != checkpoints measure:\n%s != %s' % (
measure,
checkpoints.dshape.measure,
),
)
# Ensure that we have a data resource to execute the query against.
_check_resources('expr', dataset_expr, resources)
_check_resources('deltas', deltas, resources)
_check_resources('checkpoints', checkpoints, resources)
# Create or retrieve the Pipeline API dataset.
if missing_values is None:
missing_values = {}
ds = new_dataset(dataset_expr, frozenset(missing_values.items()))
# Register our new dataset with the loader.
(loader if loader is not None else global_loader).register_dataset(
ds,
bind_expression_to_resources(dataset_expr, resources),
bind_expression_to_resources(deltas, resources)
if deltas is not None else
None,
bind_expression_to_resources(checkpoints, resources)
if checkpoints is not None else
None,
odo_kwargs=odo_kwargs,
)
if single_column is not None:
# We were passed a single column, extract and return it.
return getattr(ds, single_column)
return ds
getdataset = op.attrgetter('dataset')
_expr_data_base = namedtuple(
'ExprData', 'expr deltas checkpoints odo_kwargs'
)
class ExprData(_expr_data_base):
"""A pair of expressions and data resources. The expressions will be
computed using the resources as the starting scope.
Parameters
----------
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
"""
def __new__(cls,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
return super(ExprData, cls).__new__(
cls,
expr,
deltas,
checkpoints,
odo_kwargs or {},
)
def __repr__(self):
# If the expressions have _resources() then the repr will
# drive computation so we take the str here.
cls = type(self)
return super(ExprData, cls).__repr__(cls(
str(self.expr),
str(self.deltas),
str(self.checkpoints),
self.odo_kwargs,
))
def __hash__(self):
return id(self)
def __eq__(self, other):
return self is other
class BlazeLoader(object):
"""A PipelineLoader for datasets constructed with ``from_blaze``.
Parameters
----------
dsmap : mapping, optional
An initial mapping of datasets to ``ExprData`` objects.
NOTE: Further mutations to this map will not be reflected by this
object.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str, optional
The timezeone to use for the data query cutoff.
pool : Pool, optional
The pool to use to run blaze queries concurrently. This object must
support ``imap_unordered``, ``apply`` and ``apply_async`` methods.
Attributes
----------
pool : Pool
The pool to use to run blaze queries concurrently. This object must
support ``imap_unordered``, ``apply`` and ``apply_async`` methods.
It is possible to change the pool after the loader has been
constructed. This allows us to set a new pool for the ``global_loader``
like: ``global_loader.pool = multiprocessing.Pool(4)``.
See Also
--------
:class:`zipline.utils.pool.SequentialPool`
:class:`multiprocessing.Pool`
"""
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
dsmap=None,
data_query_time=None,
data_query_tz=None,
pool=SequentialPool()):
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
# explicitly public
self.pool = pool
self._table_expressions = (dsmap or {}).copy()
@classmethod
@memoize(cache=WeakKeyDictionary())
def global_instance(cls):
return cls()
def __hash__(self):
return id(self)
def __contains__(self, column):
return column in self._table_expressions
def __getitem__(self, column):
return self._table_expressions[column]
def __iter__(self):
return iter(self._table_expressions)
def __len__(self):
return len(self._table_expressions)
def __call__(self, column):
if column in self:
return self
raise KeyError(column)
def register_dataset(self,
dataset,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
expr_data = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
for column in dataset.columns:
self._table_expressions[column] = expr_data
def register_column(self,
column,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a single bound column to a collection of blaze
expressions. The expressions need to have ``timestamp`` and ``as_of``
columns.
Parameters
----------
column : BoundColumn
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
self._table_expressions[column] = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
def load_adjusted_array(self, columns, dates, assets, mask):
return merge(
self.pool.imap_unordered(
partial(self._load_dataset, dates, assets, mask),
itervalues(groupby(getitem(self._table_expressions), columns)),
),
)
def _load_dataset(self, dates, assets, mask, columns):
try:
(expr_data,) = {self._table_expressions[c] for c in columns}
except ValueError:
raise AssertionError(
'all columns must share the same expression data',
)
expr, deltas, checkpoints, odo_kwargs = expr_data
have_sids = (first(columns).dataset.ndim == 2)
added_query_fields = {AD_FIELD_NAME, TS_FIELD_NAME} | (
{SID_FIELD_NAME} if have_sids else set()
)
requested_columns = set(map(getname, columns))
colnames = sorted(added_query_fields | requested_columns)
data_query_time = self._data_query_time
data_query_tz = self._data_query_tz
lower_dt, upper_dt = normalize_data_query_bounds(
dates[0],
dates[-1],
data_query_time,
data_query_tz,
)
def collect_expr(e, lower):
"""Materialize the expression as a dataframe.
Parameters
----------
e : Expr
The baseline or deltas expression.
lower : datetime
The lower time bound to query.
Returns
-------
result : pd.DataFrame
The resulting dataframe.
Notes
-----
This can return more data than needed. The in memory reindex will
handle this.
"""
predicate = e[TS_FIELD_NAME] < upper_dt
if lower is not None:
predicate &= e[TS_FIELD_NAME] >= lower
return odo(e[predicate][colnames], pd.DataFrame, **odo_kwargs)
lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints, colnames, lower_dt, odo_kwargs
)
materialized_expr_deferred = self.pool.apply_async(
collect_expr,
(expr, lower),
)
materialized_deltas = (
self.pool.apply(collect_expr, (deltas, lower))
if deltas is not None else
None
)
all_rows = pd.concat(
filter(
lambda df: df is not None, (
materialized_checkpoints,
materialized_expr_deferred.get(),
materialized_deltas,
),
),
ignore_index=True,
copy=False,
)
all_rows[TS_FIELD_NAME] = all_rows[TS_FIELD_NAME].astype(
'datetime64[ns]',
)
all_rows.sort_values([TS_FIELD_NAME, AD_FIELD_NAME], inplace=True)
if have_sids:
return adjusted_arrays_from_rows_with_assets(
dates,
data_query_time,
data_query_tz,
assets,
columns,
all_rows,
)
else:
return adjusted_arrays_from_rows_without_assets(
dates,
data_query_time,
data_query_tz,
columns,
all_rows,
)
global_loader = BlazeLoader.global_instance()
def bind_expression_to_resources(expr, resources):
"""
Bind a Blaze expression to resources.
Parameters
----------
expr : bz.Expr
The expression to which we want to bind resources.
resources : dict[bz.Symbol -> any]
Mapping from the loadable terms of ``expr`` to actual data resources.
Returns
-------
bound_expr : bz.Expr
``expr`` with bound resources.
"""
# bind the resources into the expression
if resources is None:
resources = {}
# _subs stands for substitute. It's not actually private, blaze just
# prefixes symbol-manipulation methods with underscores to prevent
# collisions with data column names.
return expr._subs({
k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources)
})
def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs):
"""
Computes a lower bound and a DataFrame checkpoints.
Parameters
----------
checkpoints : Expr
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
colnames : iterable of str
The names of the columns for which checkpoints should be computed.
lower_dt : pd.Timestamp
The lower date being queried for that serves as an upper bound for
checkpoints.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
"""
if checkpoints is not None:
ts = checkpoints[TS_FIELD_NAME]
checkpoints_ts = odo(
ts[ts <= lower_dt].max(),
pd.Timestamp,
**odo_kwargs
)
if pd.isnull(checkpoints_ts):
# We don't have a checkpoint for before our start date so just
# don't constrain the lower date.
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None
else:
materialized_checkpoints = odo(
checkpoints[ts == checkpoints_ts][colnames],
pd.DataFrame,
**odo_kwargs
)
lower = checkpoints_ts
else:
materialized_checkpoints = pd.DataFrame(columns=colnames)
lower = None # we don't have a good lower date constraint
return lower, materialized_checkpoints
def ffill_query_in_range(expr,
lower,
upper,
checkpoints=None,
odo_kwargs=None,
ts_field=TS_FIELD_NAME):
"""Query a blaze expression in a given time range properly forward filling
from values that fall before the lower date.
Parameters
----------
expr : Expr
Bound blaze expression.
lower : datetime
The lower date to query for.
upper : datetime
The upper date to query for.
checkpoints : Expr, optional
Bound blaze expression for a checkpoints table from which to get a
computed lower bound.
odo_kwargs : dict, optional
The extra keyword arguments to pass to ``odo``.
ts_field : str, optional
The name of the timestamp field in the given blaze expression.
Returns
-------
raw : pd.DataFrame
A strict dataframe for the data in the given date range. This may
start before the requested start date if a value is needed to ffill.
"""
odo_kwargs = odo_kwargs or {}
computed_lower, materialized_checkpoints = get_materialized_checkpoints(
checkpoints,
expr.fields,
lower,
odo_kwargs,
)
pred = expr[ts_field] <= upper
if computed_lower is not None:
# only constrain the lower date if we computed a new lower date
pred &= expr[ts_field] >= computed_lower
raw = pd.concat(
(
materialized_checkpoints,
odo(
expr[pred],
pd.DataFrame,
**odo_kwargs
),
),
ignore_index=True,
)
raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]')
return raw | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/blaze/core.py | core.py |
from datashape import istabular
from .core import (
bind_expression_to_resources,
)
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.pipeline.loaders.blaze.utils import load_raw_data
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
PreviousEarningsEstimatesLoader,
required_estimates_fields,
metadata_columns,
PreviousSplitAdjustedEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader)
from zipline.pipeline.loaders.utils import (
check_data_query_args,
)
from zipline.utils.input_validation import ensure_timezone, optionally
from zipline.utils.preprocess import preprocess
class BlazeEstimatesLoader(PipelineLoader):
"""An abstract pipeline loader for the estimates datasets that loads
data from a blaze expression.
Parameters
----------
expr : Expr
The expression representing the data to load.
columns : dict[str -> str]
A dict mapping BoundColumn names to the associated names in `expr`.
resources : dict, optional
Mapping from the loadable terms of ``expr`` to actual data resources.
odo_kwargs : dict, optional
Extra keyword arguments to pass to odo when executing the expression.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str
The timezeone to use for the data query cutoff.
checkpoints : Expr, optional
The expression representing checkpointed data to be used for faster
forward-filling of data from `expr`.
Notes
-----
The expression should have a tabular dshape of::
Dim * {{
{SID_FIELD_NAME}: int64,
{TS_FIELD_NAME}: datetime,
{FISCAL_YEAR_FIELD_NAME}: float64,
{FISCAL_QUARTER_FIELD_NAME}: float64,
{EVENT_DATE_FIELD_NAME}: datetime,
}}
And other dataset-specific fields, where each row of the table is a
record including the sid to identify the company, the timestamp where we
learned about the announcement, and the date of the event.
If the '{TS_FIELD_NAME}' field is not included it is assumed that we
start the backtest with knowledge of all announcements.
"""
__doc__ = __doc__.format(
SID_FIELD_NAME=SID_FIELD_NAME,
TS_FIELD_NAME=TS_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME=FISCAL_YEAR_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME=FISCAL_QUARTER_FIELD_NAME,
EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME,
)
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
expr,
columns,
resources=None,
odo_kwargs=None,
data_query_time=None,
data_query_tz=None,
checkpoints=None):
dshape = expr.dshape
if not istabular(dshape):
raise ValueError(
'expression dshape must be tabular, got: %s' % dshape,
)
required_cols = list(
required_estimates_fields(columns)
)
self._expr = bind_expression_to_resources(
expr[required_cols],
resources,
)
self._columns = columns
self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {}
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
self._checkpoints = checkpoints
def load_adjusted_array(self, columns, dates, assets, mask):
# Only load requested columns.
requested_column_names = [self._columns[column.name]
for column in columns]
raw = load_raw_data(
assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr[sorted(metadata_columns.union(requested_column_names))],
self._odo_kwargs,
checkpoints=self._checkpoints,
)
return self.loader(
raw,
{column.name: self._columns[column.name] for column in columns},
).load_adjusted_array(
columns,
dates,
assets,
mask,
)
class BlazeNextEstimatesLoader(BlazeEstimatesLoader):
loader = NextEarningsEstimatesLoader
class BlazePreviousEstimatesLoader(BlazeEstimatesLoader):
loader = PreviousEarningsEstimatesLoader
class BlazeSplitAdjustedEstimatesLoader(BlazeEstimatesLoader):
def __init__(self,
expr,
columns,
split_adjustments_loader,
split_adjusted_column_names,
split_adjusted_asof,
**kwargs):
self._split_adjustments = split_adjustments_loader
self._split_adjusted_column_names = split_adjusted_column_names
self._split_adjusted_asof = split_adjusted_asof
super(BlazeSplitAdjustedEstimatesLoader, self).__init__(
expr,
columns,
**kwargs
)
def load_adjusted_array(self, columns, dates, assets, mask):
# Only load requested columns.
requested_column_names = [self._columns[column.name]
for column in columns]
requested_spilt_adjusted_columns = [
column_name
for column_name in self._split_adjusted_column_names
if column_name in requested_column_names
]
raw = load_raw_data(
assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr[sorted(metadata_columns.union(requested_column_names))],
self._odo_kwargs,
checkpoints=self._checkpoints,
)
return self.loader(
raw,
{column.name: self._columns[column.name] for column in columns},
self._split_adjustments,
requested_spilt_adjusted_columns,
self._split_adjusted_asof,
).load_adjusted_array(
columns,
dates,
assets,
mask,
)
class BlazeNextSplitAdjustedEstimatesLoader(BlazeSplitAdjustedEstimatesLoader):
loader = NextSplitAdjustedEarningsEstimatesLoader
class BlazePreviousSplitAdjustedEstimatesLoader(
BlazeSplitAdjustedEstimatesLoader
):
loader = PreviousSplitAdjustedEarningsEstimatesLoader | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/blaze/estimates.py | estimates.py |
from zipline.pipeline.common import SID_FIELD_NAME, TS_FIELD_NAME
from zipline.pipeline.loaders.blaze.core import ffill_query_in_range
from zipline.pipeline.loaders.utils import (
normalize_data_query_bounds,
normalize_timestamp_to_query_time,
)
def load_raw_data(assets,
dates,
data_query_time,
data_query_tz,
expr,
odo_kwargs,
checkpoints=None):
"""
Given an expression representing data to load, perform normalization and
forward-filling and return the data, materialized. Only accepts data with a
`sid` field.
Parameters
----------
assets : pd.int64index
the assets to load data for.
dates : pd.datetimeindex
the simulation dates to load data for.
data_query_time : datetime.time
the time used as cutoff for new information.
data_query_tz : tzinfo
the timezone to normalize your dates to before comparing against
`time`.
expr : expr
the expression representing the data to load.
odo_kwargs : dict
extra keyword arguments to pass to odo when executing the expression.
checkpoints : expr, optional
the expression representing the checkpointed data for `expr`.
Returns
-------
raw : pd.dataframe
The result of computing expr and materializing the result as a
dataframe.
"""
lower_dt, upper_dt = normalize_data_query_bounds(
dates[0],
dates[-1],
data_query_time,
data_query_tz,
)
raw = ffill_query_in_range(
expr,
lower_dt,
upper_dt,
checkpoints=checkpoints,
odo_kwargs=odo_kwargs,
)
sids = raw[SID_FIELD_NAME]
raw.drop(
sids[~sids.isin(assets)].index,
inplace=True
)
if data_query_time is not None:
normalize_timestamp_to_query_time(
raw,
data_query_time,
data_query_tz,
inplace=True,
ts_field=TS_FIELD_NAME,
)
return raw | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/blaze/utils.py | utils.py |
from datashape import istabular
from .core import (
bind_expression_to_resources,
)
from zipline.pipeline.common import SID_FIELD_NAME, TS_FIELD_NAME, \
EVENT_DATE_FIELD_NAME
from zipline.pipeline.loaders.base import PipelineLoader
from zipline.pipeline.loaders.blaze.utils import load_raw_data
from zipline.pipeline.loaders.events import (
EventsLoader,
required_event_fields,
)
from zipline.pipeline.loaders.utils import (
check_data_query_args,
)
from zipline.utils.input_validation import ensure_timezone, optionally
from zipline.utils.preprocess import preprocess
class BlazeEventsLoader(PipelineLoader):
"""An abstract pipeline loader for the events datasets that loads
data from a blaze expression.
Parameters
----------
expr : Expr
The expression representing the data to load.
next_value_columns : dict[BoundColumn -> raw column name]
A dict mapping 'next' BoundColumns to their column names in `expr`.
previous_value_columns : dict[BoundColumn -> raw column name]
A dict mapping 'previous' BoundColumns to their column names in `expr`.
resources : dict, optional
Mapping from the loadable terms of ``expr`` to actual data resources.
odo_kwargs : dict, optional
Extra keyword arguments to pass to odo when executing the expression.
data_query_time : time, optional
The time to use for the data query cutoff.
data_query_tz : tzinfo or str
The timezone to use for the data query cutoff.
Notes
-----
The expression should have a tabular dshape of::
Dim * {{
{SID_FIELD_NAME}: int64,
{TS_FIELD_NAME}: datetime,
{EVENT_DATE_FIELD_NAME}: datetime,
}}
And other dataset-specific fields, where each row of the table is a
record including the sid to identify the company, the timestamp where we
learned about the announcement, and the event date.
If the '{TS_FIELD_NAME}' field is not included it is assumed that we
start the backtest with knowledge of all announcements.
"""
__doc__ = __doc__.format(SID_FIELD_NAME=SID_FIELD_NAME,
TS_FIELD_NAME=TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME)
@preprocess(data_query_tz=optionally(ensure_timezone))
def __init__(self,
expr,
next_value_columns,
previous_value_columns,
resources=None,
odo_kwargs=None,
data_query_time=None,
data_query_tz=None):
dshape = expr.dshape
if not istabular(dshape):
raise ValueError(
'expression dshape must be tabular, got: %s' % dshape,
)
required_cols = list(
required_event_fields(next_value_columns, previous_value_columns)
)
self._expr = bind_expression_to_resources(
expr[required_cols],
resources,
)
self._next_value_columns = next_value_columns
self._previous_value_columns = previous_value_columns
self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {}
check_data_query_args(data_query_time, data_query_tz)
self._data_query_time = data_query_time
self._data_query_tz = data_query_tz
def load_adjusted_array(self, columns, dates, assets, mask):
raw = load_raw_data(assets,
dates,
self._data_query_time,
self._data_query_tz,
self._expr,
self._odo_kwargs)
return EventsLoader(
events=raw,
next_value_columns=self._next_value_columns,
previous_value_columns=self._previous_value_columns,
).load_adjusted_array(
columns,
dates,
assets,
mask,
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/pipeline/loaders/blaze/events.py | events.py |
from abc import ABCMeta
import array
import binascii
from collections import deque, namedtuple
from functools import partial
from numbers import Integral
from operator import itemgetter, attrgetter
import struct
from logbook import Logger
import numpy as np
import pandas as pd
from pandas import isnull
from six import with_metaclass, string_types, viewkeys, iteritems
import sqlalchemy as sa
from toolz import (
compose,
concat,
concatv,
curry,
groupby,
merge,
partition_all,
sliding_window,
valmap,
)
from toolz.curried import operator as op
from zipline.errors import (
EquitiesNotFound,
FutureContractsNotFound,
MapAssetIdentifierIndexError,
MultipleSymbolsFound,
MultipleValuesFoundForField,
MultipleValuesFoundForSid,
NoValueForSid,
ValueNotFoundForField,
SidsNotFound,
SymbolNotFound,
)
from . import (
Asset, Equity, Future,
)
from . continuous_futures import (
ADJUSTMENT_STYLES,
CHAIN_PREDICATES,
ContinuousFuture,
OrderedContracts,
)
from .asset_writer import (
check_version_info,
split_delimited_symbol,
asset_db_table_names,
symbol_columns,
SQLITE_MAX_VARIABLE_NUMBER,
)
from .asset_db_schema import (
ASSET_DB_VERSION
)
from .exchange_info import ExchangeInfo
from zipline.utils.functional import invert
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import as_column
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_eng
log = Logger('assets.py')
# A set of fields that need to be converted to strings before building an
# Asset to avoid unicode fields
_asset_str_fields = frozenset({
'symbol',
'asset_name',
'exchange',
})
# A set of fields that need to be converted to timestamps in UTC
_asset_timestamp_fields = frozenset({
'start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date',
})
OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value')
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
def _build_ownership_map_from_rows(rows, key_from_row, value_from_row):
mappings = {}
for row in rows:
mappings.setdefault(
key_from_row(row),
[],
).append(
OwnershipPeriod(
pd.Timestamp(row.start_date, unit='ns', tz='utc'),
pd.Timestamp(row.end_date, unit='ns', tz='utc'),
row.sid,
value_from_row(row),
),
)
return merge_ownership_periods(mappings)
def build_ownership_map(table, key_from_row, value_from_row):
"""
Builds a dict mapping to lists of OwnershipPeriods, from a db table.
"""
return _build_ownership_map_from_rows(
sa.select(table.c).execute().fetchall(),
key_from_row,
value_from_row,
)
def build_grouped_ownership_map(table,
key_from_row,
value_from_row,
group_key):
"""
Builds a dict mapping group keys to maps of keys to to lists of
OwnershipPeriods, from a db table.
"""
grouped_rows = groupby(
group_key,
sa.select(table.c).execute().fetchall(),
)
return {
key: _build_ownership_map_from_rows(
rows,
key_from_row,
value_from_row,
)
for key, rows in grouped_rows.items()
}
@curry
def _filter_kwargs(names, dict_):
"""Filter out kwargs from a dictionary.
Parameters
----------
names : set[str]
The names to select from ``dict_``.
dict_ : dict[str, any]
The dictionary to select from.
Returns
-------
kwargs : dict[str, any]
``dict_`` where the keys intersect with ``names`` and the values are
not None.
"""
return {k: v for k, v in dict_.items() if k in names and v is not None}
_filter_future_kwargs = _filter_kwargs(Future._kwargnames)
_filter_equity_kwargs = _filter_kwargs(Equity._kwargnames)
def _convert_asset_timestamp_fields(dict_):
"""
Takes in a dict of Asset init args and converts dates to pd.Timestamps
"""
for key in _asset_timestamp_fields & viewkeys(dict_):
value = pd.Timestamp(dict_[key], tz='UTC')
dict_[key] = None if isnull(value) else value
return dict_
SID_TYPE_IDS = {
# Asset would be 0,
ContinuousFuture: 1,
}
CONTINUOUS_FUTURE_ROLL_STYLE_IDS = {
'calendar': 0,
'volume': 1,
}
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = {
None: 0,
'div': 1,
'add': 2,
}
def _encode_continuous_future_sid(root_symbol,
offset,
roll_style,
adjustment_style):
s = struct.Struct("B 2B B B B 2B")
# B - sid type
# 2B - root symbol
# B - offset (could be packed smaller since offsets of greater than 12 are
# probably unneeded.)
# B - roll type
# B - adjustment
# 2B - empty space left for parameterized roll types
# The root symbol currently supports 2 characters. If 3 char root symbols
# are needed, the size of the root symbol does not need to change, however
# writing the string directly will need to change to a scheme of writing
# the A-Z values in 5-bit chunks.
a = array.array('B', [0] * s.size)
rs = bytearray(root_symbol, 'ascii')
values = (SID_TYPE_IDS[ContinuousFuture],
rs[0],
rs[1],
offset,
CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style],
CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style],
0, 0)
s.pack_into(a, 0, *values)
return int(binascii.hexlify(a), 16)
class AssetFinder(object):
"""
An AssetFinder is an interface to a database of Asset metadata written by
an ``AssetDBWriter``.
This class provides methods for looking up assets by unique integer id or
by symbol. For historical reasons, we refer to these unique ids as 'sids'.
Parameters
----------
engine : str or SQLAlchemy.engine
An engine with a connection to the asset database to use, or a string
that can be parsed by SQLAlchemy as a URI.
future_chain_predicates : dict
A dict mapping future root symbol to a predicate function which accepts
a contract as a parameter and returns whether or not the contract should be
included in the chain.
See Also
--------
:class:`zipline.assets.AssetDBWriter`
"""
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES):
self.engine = engine
metadata = sa.MetaData(bind=engine)
metadata.reflect(only=asset_db_table_names)
for table_name in asset_db_table_names:
setattr(self, table_name, metadata.tables[table_name])
# Check the version info of the db for compatibility
check_version_info(engine, self.version_info, ASSET_DB_VERSION)
# Cache for lookup of assets by sid, the objects in the asset lookup
# may be shared with the results from equity and future lookup caches.
#
# The top level cache exists to minimize lookups on the asset type
# routing.
#
# The caches are read through, i.e. accessing an asset through
# retrieve_asset will populate the cache on first retrieval.
self._asset_cache = {}
self._asset_type_cache = {}
self._caches = (self._asset_cache, self._asset_type_cache)
self._future_chain_predicates = future_chain_predicates \
if future_chain_predicates is not None else {}
self._ordered_contracts = {}
# Populated on first call to `lifetimes`.
self._asset_lifetimes = {}
@lazyval
def exchange_info(self):
es = sa.select(self.exchanges.c).execute().fetchall()
return {
name: ExchangeInfo(name, canonical_name, country_code)
for name, canonical_name, country_code in es
}
def _reset_caches(self):
"""
Reset our asset caches.
You probably shouldn't call this method.
"""
# This method exists as a workaround for the in-place mutating behavior
# of `TradingAlgorithm._write_and_map_id_index_to_sids`. No one else
# should be calling this.
for cache in self._caches:
cache.clear()
self.reload_symbol_maps()
del type(self).exchanges[self]
def reload_symbol_maps(self):
"""Clear the in memory symbol lookup maps.
This will make any changes to the underlying db available to the
symbol maps.
"""
# clear the lazyval caches, the next access will requery
for attr in dir(type(self)):
value = getattr(self, attr)
if isinstance(value, lazyval):
del value[self]
@lazyval
def symbol_ownership_map(self):
out = {}
for mappings in self.symbol_ownership_maps_by_country_code.values():
for key, ownership_periods in mappings.items():
out.setdefault(key, []).extend(ownership_periods)
return out
@lazyval
def symbol_ownership_maps_by_country_code(self):
sid_to_country_code = dict(
sa.select((
self.equities.c.sid,
self.exchanges.c.country_code,
)).where(
self.equities.c.exchange == self.exchanges.c.exchange
).execute().fetchall(),
)
return build_grouped_ownership_map(
table=self.equity_symbol_mappings,
key_from_row=(
lambda row: (row.company_symbol, row.share_class_symbol)
),
value_from_row=lambda row: row.symbol,
group_key=lambda row: sid_to_country_code[row.sid],
)
@staticmethod
def _fuzzify_symbol_ownership_map(ownership_map):
fuzzy_mappings = {}
for (cs, scs), owners in iteritems(ownership_map):
fuzzy_owners = fuzzy_mappings.setdefault(
cs + scs,
[],
)
fuzzy_owners.extend(owners)
fuzzy_owners.sort()
return fuzzy_mappings
@lazyval
def fuzzy_symbol_ownership_map(self):
return self._fuzzify_symbol_ownership_map(self.symbol_ownership_map)
@lazyval
def fuzzy_symbol_ownership_maps_by_country_code(self):
return valmap(
self._fuzzify_symbol_ownership_map,
self.symbol_ownership_maps_by_country_code,
)
@lazyval
def equity_supplementary_map(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.value),
value_from_row=lambda row: row.value,
)
@lazyval
def equity_supplementary_map_by_sid(self):
return build_ownership_map(
table=self.equity_supplementary_mappings,
key_from_row=lambda row: (row.field, row.sid),
value_from_row=lambda row: row.value,
)
def lookup_asset_types(self, sids):
"""
Retrieve asset types for a list of sids.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[sid -> str or None]
Asset types for the provided sids.
"""
found = {}
missing = set()
for sid in sids:
try:
found[sid] = self._asset_type_cache[sid]
except KeyError:
missing.add(sid)
if not missing:
return found
router_cols = self.asset_router.c
for assets in group_into_chunks(missing):
query = sa.select((router_cols.sid, router_cols.asset_type)).where(
self.asset_router.c.sid.in_(map(int, assets))
)
for sid, type_ in query.execute().fetchall():
missing.remove(sid)
found[sid] = self._asset_type_cache[sid] = type_
for sid in missing:
found[sid] = self._asset_type_cache[sid] = None
return found
def group_by_type(self, sids):
"""
Group a list of sids by asset type.
Parameters
----------
sids : list[int]
Returns
-------
types : dict[str or None -> list[int]]
A dict mapping unique asset types to lists of sids drawn from sids.
If we fail to look up an asset, we assign it a key of None.
"""
return invert(self.lookup_asset_types(sids))
def retrieve_asset(self, sid, default_none=False):
"""
Retrieve the Asset for a given sid.
"""
try:
asset = self._asset_cache[sid]
if asset is None and not default_none:
raise SidsNotFound(sids=[sid])
return asset
except KeyError:
return self.retrieve_all((sid,), default_none=default_none)[0]
def retrieve_all(self, sids, default_none=False):
"""
Retrieve all assets in `sids`.
Parameters
----------
sids : iterable of int
Assets to retrieve.
default_none : bool
If True, return None for failed lookups.
If False, raise `SidsNotFound`.
Returns
-------
assets : list[Asset or None]
A list of the same length as `sids` containing Assets (or Nones)
corresponding to the requested sids.
Raises
------
SidsNotFound
When a requested sid is not found and default_none=False.
"""
sids = list(sids)
hits, missing, failures = {}, set(), []
for sid in sids:
try:
asset = self._asset_cache[sid]
if not default_none and asset is None:
# Bail early if we've already cached that we don't know
# about an asset.
raise SidsNotFound(sids=[sid])
hits[sid] = asset
except KeyError:
missing.add(sid)
# All requests were cache hits. Return requested sids in order.
if not missing:
return [hits[sid] for sid in sids]
update_hits = hits.update
# Look up cache misses by type.
type_to_assets = self.group_by_type(missing)
# Handle failures
failures = {failure: None for failure in type_to_assets.pop(None, ())}
update_hits(failures)
self._asset_cache.update(failures)
if failures and not default_none:
raise SidsNotFound(sids=list(failures))
# We don't update the asset cache here because it should already be
# updated by `self.retrieve_equities`.
update_hits(self.retrieve_equities(type_to_assets.pop('equity', ())))
update_hits(
self.retrieve_futures_contracts(type_to_assets.pop('future', ()))
)
# We shouldn't know about any other asset types.
if type_to_assets:
raise AssertionError(
"Found asset types: %s" % list(type_to_assets.keys())
)
return [hits[sid] for sid in sids]
def retrieve_equities(self, sids):
"""
Retrieve Equity objects for a list of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.equities, Equity)
def _retrieve_equity(self, sid):
return self.retrieve_equities((sid,))[sid]
def retrieve_futures_contracts(self, sids):
"""
Retrieve Future objects for an iterable of sids.
Users generally shouldn't need to this method (instead, they should
prefer the more general/friendly `retrieve_assets`), but it has a
documented interface and tests because it's used upstream.
Parameters
----------
sids : iterable[int]
Returns
-------
equities : dict[int -> Equity]
Raises
------
EquitiesNotFound
When any requested asset isn't found.
"""
return self._retrieve_assets(sids, self.futures_contracts, Future)
@staticmethod
def _select_assets_by_sid(asset_tbl, sids):
return sa.select([asset_tbl]).where(
asset_tbl.c.sid.in_(map(int, sids))
)
@staticmethod
def _select_asset_by_symbol(asset_tbl, symbol):
return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol)
def _select_most_recent_symbols_chunk(self, sid_group):
"""Retrieve the most recent symbol for a set of sids.
Parameters
----------
sid_group : iterable[int]
The sids to lookup. The length of this sequence must be less than
or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be
passed in as sql bind params.
Returns
-------
sel : Selectable
The sqlalchemy selectable that will query for the most recent
symbol for each sid.
Notes
-----
This is implemented as an inner select of the columns of interest
ordered by the end date of the (sid, symbol) mapping. We then group
that inner select on the sid with no aggregations to select the last
row per group which gives us the most recently active symbol for all
of the sids.
"""
symbol_cols = self.equity_symbol_mappings.c
inner = sa.select(
(symbol_cols.sid,) +
tuple(map(
op.getitem(symbol_cols),
symbol_columns,
)),
).where(
symbol_cols.sid.in_(map(int, sid_group)),
).order_by(
symbol_cols.end_date.asc(),
)
return sa.select(inner.c).group_by(inner.c.sid)
def _lookup_most_recent_symbols(self, sids):
symbols = {
row.sid: {c: row[c] for c in symbol_columns}
for row in concat(
self.engine.execute(
self._select_most_recent_symbols_chunk(sid_group),
).fetchall()
for sid_group in partition_all(
SQLITE_MAX_VARIABLE_NUMBER,
sids
),
)
}
if len(symbols) != len(sids):
raise EquitiesNotFound(
sids=set(sids) - set(symbols),
plural=True,
)
return symbols
def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities):
if not sids:
return
if querying_equities:
def mkdict(row,
exchanges=self.exchange_info,
symbols=self._lookup_most_recent_symbols(sids)):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return merge(d, symbols[row['sid']])
else:
def mkdict(row, exchanges=self.exchange_info):
d = dict(row)
d['exchange_info'] = exchanges[d.pop('exchange')]
return d
for assets in group_into_chunks(sids):
# Load misses from the db.
query = self._select_assets_by_sid(asset_tbl, assets)
for row in query.execute().fetchall():
yield _convert_asset_timestamp_fields(mkdict(row))
def _retrieve_assets(self, sids, asset_tbl, asset_type):
"""
Internal function for loading assets from a table.
This should be the only method of `AssetFinder` that writes Assets into
self._asset_cache.
Parameters
---------
sids : iterable of int
Asset ids to look up.
asset_tbl : sqlalchemy.Table
Table from which to query assets.
asset_type : type
Type of asset to be constructed.
Returns
-------
assets : dict[int -> Asset]
Dict mapping requested sids to the retrieved assets.
"""
# Fastpath for empty request.
if not sids:
return {}
cache = self._asset_cache
hits = {}
querying_equities = issubclass(asset_type, Equity)
filter_kwargs = (
_filter_equity_kwargs
if querying_equities else
_filter_future_kwargs
)
rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities)
for row in rows:
sid = row['sid']
asset = asset_type(**filter_kwargs(row))
hits[sid] = cache[sid] = asset
# If we get here, it means something in our code thought that a
# particular sid was an equity/future and called this function with a
# concrete type, but we couldn't actually resolve the asset. This is
# an error in our code, not a user-input error.
misses = tuple(set(sids) - viewkeys(hits))
if misses:
if querying_equities:
raise EquitiesNotFound(sids=misses)
else:
raise FutureContractsNotFound(sids=misses)
return hits
def _lookup_symbol_strict(self,
ownership_map,
multi_country,
symbol,
as_of_date):
"""
Resolve a symbol to an asset object without fuzzy matching.
Parameters
----------
ownership_map : dict[(str, str), list[OwnershipPeriod]]
The mapping from split symbols to ownership periods.
multi_country : bool
Does this mapping span multiple countries?
symbol : str
The symbol to look up.
as_of_date : datetime or None
If multiple assets have held this sid, which day should the
resolution be checked against? If this value is None and multiple
sids have held the ticker, then a MultipleSymbolsFound error will
be raised.
Returns
-------
asset : Asset
The asset that held the given symbol.
Raises
------
SymbolNotFound
Raised when the symbol or symbol as_of_date pair do not map to
any assets.
MultipleSymbolsFound
Raised when multiple assets held the symbol. This happens if
multiple assets held the symbol at disjoint times and
``as_of_date`` is None, or if multiple assets held the symbol at
the same time and``multi_country`` is True.
Notes
-----
The resolution algorithm is as follows:
- Split the symbol into the company and share class component.
- Do a dictionary lookup of the
``(company_symbol, share_class_symbol)`` in the provided ownership
map.
- If there is no entry in the dictionary, we don't know about this
symbol so raise a ``SymbolNotFound`` error.
- If ``as_of_date`` is None:
- If more there is more than one owner, raise
``MultipleSymbolsFound``
- Otherwise, because the list mapped to a symbol cannot be empty,
return the single asset.
- Iterate through all of the owners:
- If the ``as_of_date`` is between the start and end of the ownership
period:
- If multi_country is False, return the found asset.
- Otherwise, put the asset in a list.
- At the end of the loop, if there are no candidate assets, raise a
``SymbolNotFound``.
- If there is exactly one candidate, return it.
- Othewise, raise ``MultipleSymbolsFound`` because the ticker is not
unique across countries.
"""
# split the symbol into the components, if there are no
# company/share class parts then share_class_symbol will be empty
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = ownership_map[company_symbol, share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held this symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this ticker, this is ambigious
# without the date
raise MultipleSymbolsFound(
symbol=symbol,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this symbol, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
options = []
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
asset = self.retrieve_asset(sid)
if not multi_country:
return asset
else:
options.append(asset)
if len(options) == 1:
return options[0]
if not options:
# no equity held the ticker on the given asof date
raise SymbolNotFound(symbol=symbol)
raise MultipleSymbolsFound(symbol=symbol, options=options)
def _lookup_symbol_fuzzy(self,
ownership_map,
multi_country,
symbol,
as_of_date):
symbol = symbol.upper()
company_symbol, share_class_symbol = split_delimited_symbol(symbol)
try:
owners = ownership_map[company_symbol + share_class_symbol]
assert owners, 'empty owners list for %r' % symbol
except KeyError:
# no equity has ever held a symbol matching the fuzzy symbol
raise SymbolNotFound(symbol=symbol)
if not as_of_date:
if len(owners) == 1:
# only one valid match
return self.retrieve_asset(owners[0].sid)
options = []
for _, _, sid, sym in owners:
if sym == symbol:
# there are multiple options, look for exact matches
options.append(self.retrieve_asset(sid))
if len(options) == 1:
# there was only one exact match
return options[0]
# there are more than one exact match for this fuzzy symbol
raise MultipleSymbolsFound(
symbol=symbol,
options=self.retrieve_all(owner.sid for owner in owners),
)
options = {}
for start, end, sid, sym in owners:
if start <= as_of_date < end:
# see which fuzzy symbols were owned on the asof date.
options[sid] = sym
if not options:
# no equity owned the fuzzy symbol on the date requested
raise SymbolNotFound(symbol=symbol)
sid_keys = list(options.keys())
# If there was only one owner, or there is a fuzzy and non-fuzzy which
# map to the same sid, return it.
if len(options) == 1:
return self.retrieve_asset(sid_keys[0])
exact_options = []
for sid, sym in options.items():
# Possible to have a scenario where multiple fuzzy matches have the
# same date. Want to find the one where symbol and share class
# match.
if ((company_symbol, share_class_symbol) ==
split_delimited_symbol(sym)):
asset = self.retrieve_asset(sid)
if not multi_country:
return asset
else:
exact_options.append(asset)
if len(exact_options) == 1:
return exact_options[0]
# multiple equities held tickers matching the fuzzy ticker but
# there are no exact matches
raise MultipleSymbolsFound(
symbol=symbol,
options=self.retrieve_all(owner.sid for owner in owners),
)
def _choose_fuzzy_symbol_ownership_map(self, country_code):
if country_code is None:
return self.fuzzy_symbol_ownership_map
return self.fuzzy_symbol_ownership_maps_by_country_code.get(
country_code,
)
def _choose_symbol_ownership_map(self, country_code):
if country_code is None:
return self.symbol_ownership_map
return self.symbol_ownership_maps_by_country_code.get(country_code)
def lookup_symbol(self,
symbol,
as_of_date,
fuzzy=False,
country_code=None):
"""Lookup an equity by symbol.
Parameters
----------
symbol : str
The ticker symbol to resolve.
as_of_date : datetime or None
Look up the last owner of this symbol as of this datetime.
If ``as_of_date`` is None, then this can only resolve the equity
if exactly one equity has ever owned the ticker.
fuzzy : bool, optional
Should fuzzy symbol matching be used? Fuzzy symbol matching
attempts to resolve differences in representations for
shareclasses. For example, some people may represent the ``A``
shareclass of ``BRK`` as ``BRK.A``, where others could write
``BRK_A``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equity : Equity
The equity that held ``symbol`` on the given ``as_of_date``, or the
only equity to hold ``symbol`` if ``as_of_date`` is None.
Raises
------
SymbolNotFound
Raised when no equity has ever held the given symbol.
MultipleSymbolsFound
Raised when no ``as_of_date`` is given and more than one equity
has held ``symbol``. This is also raised when ``fuzzy=True`` and
there are multiple candidates for the given ``symbol`` on the
``as_of_date``. Also raised when no ``country_code`` is given and
the symbol is ambiguous across multiple countries.
"""
if symbol is None:
raise TypeError("Cannot lookup asset for symbol of None for "
"as of date %s." % as_of_date)
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbol)
return f(
mapping,
country_code is None,
symbol,
as_of_date,
)
def lookup_symbols(self,
symbols,
as_of_date,
fuzzy=False,
country_code=None):
"""
Lookup a list of equities by symbol.
Equivalent to::
[finder.lookup_symbol(s, as_of, fuzzy) for s in symbols]
but potentially faster because repeated lookups are memoized.
Parameters
----------
symbols : sequence[str]
Sequence of ticker symbols to resolve.
as_of_date : pd.Timestamp
Forwarded to ``lookup_symbol``.
fuzzy : bool, optional
Forwarded to ``lookup_symbol``.
country_code : str or None, optional
The country to limit searches to. If not provided, the search will
span all countries which increases the likelihood of an ambiguous
lookup.
Returns
-------
equities : list[Equity]
"""
if not symbols:
return []
multi_country = country_code is None
if fuzzy:
f = self._lookup_symbol_fuzzy
mapping = self._choose_fuzzy_symbol_ownership_map(country_code)
else:
f = self._lookup_symbol_strict
mapping = self._choose_symbol_ownership_map(country_code)
if mapping is None:
raise SymbolNotFound(symbol=symbols[0])
memo = {}
out = []
append_output = out.append
for sym in symbols:
if sym in memo:
append_output(memo[sym])
else:
equity = memo[sym] = f(
mapping,
multi_country,
sym,
as_of_date,
)
append_output(equity)
return out
def lookup_future_symbol(self, symbol):
"""Lookup a future contract by symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : Future
The future contract referenced by ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
data = self._select_asset_by_symbol(self.futures_contracts, symbol)\
.execute().fetchone()
# If no data found, raise an exception
if not data:
raise SymbolNotFound(symbol=symbol)
return self.retrieve_asset(data['sid'])
def lookup_by_supplementary_field(self, field_name, value, as_of_date):
try:
owners = self.equity_supplementary_map[
field_name,
value,
]
assert owners, 'empty owners list for %r' % (field_name, value)
except KeyError:
# no equity has ever held this value
raise ValueNotFoundForField(field=field_name, value=value)
if not as_of_date:
if len(owners) > 1:
# more than one equity has held this value, this is ambigious
# without the date
raise MultipleValuesFoundForField(
field=field_name,
value=value,
options=set(map(
compose(self.retrieve_asset, attrgetter('sid')),
owners,
)),
)
# exactly one equity has ever held this value, we may resolve
# without the date
return self.retrieve_asset(owners[0].sid)
for start, end, sid, _ in owners:
if start <= as_of_date < end:
# find the equity that owned it on the given asof date
return self.retrieve_asset(sid)
# no equity held the value on the given asof date
raise ValueNotFoundForField(field=field_name, value=value)
def get_supplementary_field(
self,
sid,
field_name,
as_of_date,
):
"""Get the value of a supplementary field for an asset.
Parameters
----------
sid : int
The sid of the asset to query.
field_name : str
Name of the supplementary field.
as_of_date : pd.Timestamp, None
The last known value on this date is returned. If None, a
value is returned only if we've only ever had one value for
this sid. If None and we've had multiple values,
MultipleValuesFoundForSid is raised.
Raises
------
NoValueForSid
If we have no values for this asset, or no values was known
on this as_of_date.
MultipleValuesFoundForSid
If we have had multiple values for this asset over time, and
None was passed for as_of_date.
"""
try:
periods = self.equity_supplementary_map_by_sid[
field_name,
sid,
]
assert periods, 'empty periods list for %r' % (field_name, sid)
except KeyError:
raise NoValueForSid(field=field_name, sid=sid)
if not as_of_date:
if len(periods) > 1:
# This equity has held more than one value, this is ambigious
# without the date
raise MultipleValuesFoundForSid(
field=field_name,
sid=sid,
options={p.value for p in periods},
)
# this equity has only ever held this value, we may resolve
# without the date
return periods[0].value
for start, end, _, value in periods:
if start <= as_of_date < end:
return value
# Could not find a value for this sid on the as_of_date.
raise NoValueForSid(field=field_name, sid=sid)
def _get_contract_sids(self, root_symbol):
fc_cols = self.futures_contracts.c
return [r.sid for r in
list(sa.select((fc_cols.sid,)).where(
(fc_cols.root_symbol == root_symbol) &
(fc_cols.start_date != pd.NaT.value)).order_by(
fc_cols.sid).execute().fetchall())]
def _get_root_symbol_exchange(self, root_symbol):
fc_cols = self.futures_root_symbols.c
fields = (fc_cols.exchange,)
exchange = sa.select(fields).where(
fc_cols.root_symbol == root_symbol).execute().scalar()
if exchange is not None:
return exchange
else:
raise SymbolNotFound(symbol=root_symbol)
def get_ordered_contracts(self, root_symbol):
try:
return self._ordered_contracts[root_symbol]
except KeyError:
contract_sids = self._get_contract_sids(root_symbol)
contracts = deque(self.retrieve_all(contract_sids))
chain_predicate = self._future_chain_predicates.get(root_symbol,
None)
oc = OrderedContracts(root_symbol, contracts, chain_predicate)
self._ordered_contracts[root_symbol] = oc
return oc
def create_continuous_future(self,
root_symbol,
offset,
roll_style,
adjustment):
if adjustment not in ADJUSTMENT_STYLES:
raise ValueError(
'Invalid adjustment style {!r}. Allowed adjustment styles are '
'{}.'.format(adjustment, list(ADJUSTMENT_STYLES))
)
oc = self.get_ordered_contracts(root_symbol)
exchange = self._get_root_symbol_exchange(root_symbol)
sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
None)
mul_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'div')
add_sid = _encode_continuous_future_sid(root_symbol, offset,
roll_style,
'add')
cf_template = partial(
ContinuousFuture,
root_symbol=root_symbol,
offset=offset,
roll_style=roll_style,
start_date=oc.start_date,
end_date=oc.end_date,
exchange_info=self.exchange_info[exchange],
)
cf = cf_template(sid=sid)
mul_cf = cf_template(sid=mul_sid, adjustment='mul')
add_cf = cf_template(sid=add_sid, adjustment='add')
self._asset_cache[cf.sid] = cf
self._asset_cache[mul_cf.sid] = mul_cf
self._asset_cache[add_cf.sid] = add_cf
return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment]
def _make_sids(tblattr):
def _(self):
return tuple(map(
itemgetter('sid'),
sa.select((
getattr(self, tblattr).c.sid,
)).execute().fetchall(),
))
return _
sids = property(
_make_sids('asset_router'),
doc='All the sids in the asset finder.',
)
equities_sids = property(
_make_sids('equities'),
doc='All of the sids for equities in the asset finder.',
)
futures_sids = property(
_make_sids('futures_contracts'),
doc='All of the sids for futures consracts in the asset finder.',
)
del _make_sids
@lazyval
def _symbol_lookups(self):
"""
An iterable of symbol lookup functions to use with ``lookup_generic``
Attempts equities lookup, then futures.
"""
return (
self.lookup_symbol,
# lookup_future_symbol method does not use as_of date, since
# symbols are unique.
#
# Wrap the function in a lambda so that both methods share a
# signature, so that when the functions are iterated over
# the consumer can use the same arguments with both methods.
lambda symbol, _: self.lookup_future_symbol(symbol)
)
def _lookup_generic_scalar(self,
asset_convertible,
as_of_date,
matches,
missing):
"""
Convert asset_convertible to an asset.
On success, append to matches.
On failure, append to missing.
"""
if isinstance(asset_convertible, Asset):
matches.append(asset_convertible)
elif isinstance(asset_convertible, Integral):
try:
result = self.retrieve_asset(int(asset_convertible))
except SidsNotFound:
missing.append(asset_convertible)
return None
matches.append(result)
elif isinstance(asset_convertible, string_types):
for lookup in self._symbol_lookups:
try:
matches.append(lookup(asset_convertible, as_of_date))
return
except SymbolNotFound:
continue
else:
missing.append(asset_convertible)
return None
else:
raise NotAssetConvertible(
"Input was %s, not AssetConvertible."
% asset_convertible
)
def lookup_generic(self,
asset_convertible_or_iterable,
as_of_date):
"""
Convert a AssetConvertible or iterable of AssetConvertibles into
a list of Asset objects.
This method exists primarily as a convenience for implementing
user-facing APIs that can handle multiple kinds of input. It should
not be used for internal code where we already know the expected types
of our inputs.
Returns a pair of objects, the first of which is the result of the
conversion, and the second of which is a list containing any values
that couldn't be resolved.
"""
matches = []
missing = []
# Interpret input as scalar.
if isinstance(asset_convertible_or_iterable, AssetConvertible):
self._lookup_generic_scalar(
asset_convertible=asset_convertible_or_iterable,
as_of_date=as_of_date,
matches=matches,
missing=missing,
)
try:
return matches[0], missing
except IndexError:
if hasattr(asset_convertible_or_iterable, '__int__'):
raise SidsNotFound(sids=[asset_convertible_or_iterable])
else:
raise SymbolNotFound(symbol=asset_convertible_or_iterable)
# If the input is a ContinuousFuture just return it as-is.
elif isinstance(asset_convertible_or_iterable, ContinuousFuture):
return asset_convertible_or_iterable, missing
# Interpret input as iterable.
try:
iterator = iter(asset_convertible_or_iterable)
except TypeError:
raise NotAssetConvertible(
"Input was not a AssetConvertible "
"or iterable of AssetConvertible."
)
for obj in iterator:
if isinstance(obj, ContinuousFuture):
matches.append(obj)
else:
self._lookup_generic_scalar(obj, as_of_date, matches, missing)
return matches, missing
def map_identifier_index_to_sids(self, index, as_of_date):
"""
This method is for use in sanitizing a user's DataFrame or Panel
inputs.
Takes the given index of identifiers, checks their types, builds assets
if necessary, and returns a list of the sids that correspond to the
input index.
Parameters
----------
index : Iterable
An iterable containing ints, strings, or Assets
as_of_date : pandas.Timestamp
A date to be used to resolve any dual-mapped symbols
Returns
-------
List
A list of integer sids corresponding to the input index
"""
# This method assumes that the type of the objects in the index is
# consistent and can, therefore, be taken from the first identifier
first_identifier = index[0]
# Ensure that input is AssetConvertible (integer, string, or Asset)
if not isinstance(first_identifier, AssetConvertible):
raise MapAssetIdentifierIndexError(obj=first_identifier)
# If sids are provided, no mapping is necessary
if isinstance(first_identifier, Integral):
return index
# Look up all Assets for mapping
matches = []
missing = []
for identifier in index:
self._lookup_generic_scalar(identifier, as_of_date,
matches, missing)
if missing:
raise ValueError("Missing assets for identifiers: %s" % missing)
# Return a list of the sids of the found assets
return [asset.sid for asset in matches]
def _compute_asset_lifetimes(self, country_codes):
"""
Compute and cache a recarray of asset lifetimes.
"""
equities_cols = self.equities.c
if country_codes:
buf = np.array(
tuple(
sa.select((
equities_cols.sid,
equities_cols.start_date,
equities_cols.end_date,
)).where(
(self.exchanges.c.exchange == equities_cols.exchange) &
(self.exchanges.c.country_code.in_(country_codes))
).execute(),
),
dtype='f8', # use doubles so we get NaNs
)
else:
buf = np.array([], dtype='f8')
lifetimes = np.recarray(
buf=buf,
shape=(len(buf),),
dtype=[
('sid', 'f8'),
('start', 'f8'),
('end', 'f8')
],
)
start = lifetimes.start
end = lifetimes.end
start[np.isnan(start)] = 0 # convert missing starts to 0
end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX
# Cast the results back down to int.
return lifetimes.astype([
('sid', 'i8'),
('start', 'i8'),
('end', 'i8'),
])
def lifetimes(self, dates, include_start_date, country_codes):
"""
Compute a DataFrame representing asset lifetimes for the specified date
range.
Parameters
----------
dates : pd.DatetimeIndex
The dates for which to compute lifetimes.
include_start_date : bool
Whether or not to count the asset as alive on its start_date.
This is useful in a backtesting context where `lifetimes` is being
used to signify "do I have data for this asset as of the morning of
this date?" For many financial metrics, (e.g. daily close), data
isn't available for an asset until the end of the asset's first
day.
country_codes : iterable[str]
The country codes to get lifetimes for.
Returns
-------
lifetimes : pd.DataFrame
A frame of dtype bool with `dates` as index and an Int64Index of
assets as columns. The value at `lifetimes.loc[date, asset]` will
be True iff `asset` existed on `date`. If `include_start_date` is
False, then lifetimes.loc[date, asset] will be false when date ==
asset.start_date.
See Also
--------
numpy.putmask
zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask
"""
# normalize this to a cache-key
country_codes = frozenset(country_codes)
# This is a less than ideal place to do this, because if someone adds
# assets to the finder after we've touched lifetimes we won't have
# those new assets available. Mutability is not my favorite
# programming feature.
lifetimes = self._asset_lifetimes.get(country_codes)
if lifetimes is None:
self._asset_lifetimes[country_codes] = lifetimes = (
self._compute_asset_lifetimes(country_codes)
)
raw_dates = as_column(dates.asi8)
if include_start_date:
mask = lifetimes.start <= raw_dates
else:
mask = lifetimes.start < raw_dates
mask &= (raw_dates <= lifetimes.end)
return pd.DataFrame(mask, index=dates, columns=lifetimes.sid)
class AssetConvertible(with_metaclass(ABCMeta)):
"""
ABC for types that are convertible to integer-representations of
Assets.
Includes Asset, six.string_types, and Integral
"""
pass
AssetConvertible.register(Integral)
AssetConvertible.register(Asset)
# Use six.string_types for Python2/3 compatibility
for _type in string_types:
AssetConvertible.register(_type)
class NotAssetConvertible(ValueError):
pass
class PricingDataAssociable(with_metaclass(ABCMeta)):
"""
ABC for types that can be associated with pricing data.
Includes Asset, Future, ContinuousFuture
"""
pass
PricingDataAssociable.register(Asset)
PricingDataAssociable.register(Future)
PricingDataAssociable.register(ContinuousFuture)
def was_active(reference_date_value, asset):
"""
Whether or not `asset` was active at the time corresponding to
`reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
asset : Asset
The asset object to check.
Returns
-------
was_active : bool
Whether or not the `asset` existed at the specified time.
"""
return (
asset.start_date.value
<= reference_date_value
<= asset.end_date.value
)
def only_active_assets(reference_date_value, assets):
"""
Filter an iterable of Asset objects down to just assets that were alive at
the time corresponding to `reference_date_value`.
Parameters
----------
reference_date_value : int
Date, represented as nanoseconds since EPOCH, for which we want to know
if `asset` was alive. This is generally the result of accessing the
`value` attribute of a pandas Timestamp.
assets : iterable[Asset]
The assets to filter.
Returns
-------
active_assets : list
List of the active assets from `assets` on the requested date.
"""
return [a for a in assets if was_active(reference_date_value, a)] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/assets/assets.py | assets.py |
from itertools import product
from string import ascii_uppercase
import pandas as pd
from pandas.tseries.offsets import MonthBegin
from six import iteritems
from .futures import CME_CODE_TO_MONTH
def make_rotating_equity_info(num_assets,
first_start,
frequency,
periods_between_starts,
asset_lifetime,
exchange='TEST'):
"""
Create a DataFrame representing lifetimes of assets that are constantly
rotating in and out of existence.
Parameters
----------
num_assets : int
How many assets to create.
first_start : pd.Timestamp
The start date for the first asset.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret next two arguments.
periods_between_starts : int
Create a new asset every `frequency` * `periods_between_new`
asset_lifetime : int
Each asset exists for `frequency` * `asset_lifetime` days.
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
return pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
# Start a new asset every `periods_between_starts` days.
'start_date': pd.date_range(
first_start,
freq=(periods_between_starts * frequency),
periods=num_assets,
),
# Each asset lasts for `asset_lifetime` days.
'end_date': pd.date_range(
first_start + (asset_lifetime * frequency),
freq=(periods_between_starts * frequency),
periods=num_assets,
),
'exchange': exchange,
},
index=range(num_assets),
)
def make_simple_equity_info(sids,
start_date,
end_date,
symbols=None,
names=None,
exchange='TEST'):
"""
Create a DataFrame representing assets that exist for the full duration
between `start_date` and `end_date`.
Parameters
----------
sids : array-like of int
start_date : pd.Timestamp, optional
end_date : pd.Timestamp, optional
symbols : list, optional
Symbols to use for the assets.
If not provided, symbols are generated from the sequence 'A', 'B', ...
names : list, optional
Names to use for the assets.
If not provided, names are generated by adding " INC." to each of the
symbols (which might also be auto-generated).
exchange : str, optional
The exchange name.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
num_assets = len(sids)
if symbols is None:
symbols = list(ascii_uppercase[:num_assets])
else:
symbols = list(symbols)
if names is None:
names = [str(s) + " INC." for s in symbols]
return pd.DataFrame(
{
'symbol': symbols,
'start_date': pd.to_datetime([start_date] * num_assets),
'end_date': pd.to_datetime([end_date] * num_assets),
'asset_name': list(names),
'exchange': exchange,
},
index=sids,
columns=(
'start_date',
'end_date',
'symbol',
'exchange',
'asset_name',
),
)
def make_jagged_equity_info(num_assets,
start_date,
first_end,
frequency,
periods_between_ends,
auto_close_delta):
"""
Create a DataFrame representing assets that all begin at the same start
date, but have cascading end dates.
Parameters
----------
num_assets : int
How many assets to create.
start_date : pd.Timestamp
The start date for all the assets.
first_end : pd.Timestamp
The date at which the first equity will end.
frequency : str or pd.tseries.offsets.Offset (e.g. trading_day)
Frequency used to interpret the next argument.
periods_between_ends : int
Starting after the first end date, end each asset every
`frequency` * `periods_between_ends`.
Returns
-------
info : pd.DataFrame
DataFrame representing newly-created assets.
"""
frame = pd.DataFrame(
{
'symbol': [chr(ord('A') + i) for i in range(num_assets)],
'start_date': start_date,
'end_date': pd.date_range(
first_end,
freq=(periods_between_ends * frequency),
periods=num_assets,
),
'exchange': 'TEST',
},
index=range(num_assets),
)
# Explicitly pass None to disable setting the auto_close_date column.
if auto_close_delta is not None:
frame['auto_close_date'] = frame['end_date'] + auto_close_delta
return frame
def make_future_info(first_sid,
root_symbols,
years,
notice_date_func,
expiration_date_func,
start_date_func,
month_codes=None,
multiplier=500):
"""
Create a DataFrame representing futures for `root_symbols` during `year`.
Generates a contract per triple of (symbol, year, month) supplied to
`root_symbols`, `years`, and `month_codes`.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
notice_date_func : (Timestamp) -> Timestamp
Function to generate notice dates from first of the month associated
with asset month code. Return NaT to simulate futures with no notice
date.
expiration_date_func : (Timestamp) -> Timestamp
Function to generate expiration dates from first of the month
associated with asset month code.
start_date_func : (Timestamp) -> Timestamp, optional
Function to generate start dates from first of the month associated
with each asset month code. Defaults to a start_date one year prior
to the month_code date.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CME_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Returns
-------
futures_info : pd.DataFrame
DataFrame of futures data suitable for passing to an AssetDBWriter.
"""
if month_codes is None:
month_codes = CME_CODE_TO_MONTH
year_strs = list(map(str, years))
years = [pd.Timestamp(s, tz='UTC') for s in year_strs]
# Pairs of string/date like ('K06', 2006-05-01)
contract_suffix_to_beginning_of_month = tuple(
(month_code + year_str[-2:], year + MonthBegin(month_num))
for ((year, year_str), (month_code, month_num))
in product(
zip(years, year_strs),
iteritems(month_codes),
)
)
contracts = []
parts = product(root_symbols, contract_suffix_to_beginning_of_month)
for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid):
contracts.append({
'sid': sid,
'root_symbol': root_sym,
'symbol': root_sym + suffix,
'start_date': start_date_func(month_begin),
'notice_date': notice_date_func(month_begin),
'expiration_date': notice_date_func(month_begin),
'multiplier': multiplier,
'exchange': "TEST",
})
return pd.DataFrame.from_records(contracts, index='sid')
def make_commodity_future_info(first_sid,
root_symbols,
years,
month_codes=None,
multiplier=500):
"""
Make futures testing data that simulates the notice/expiration date
behavior of physical commodities like oil.
Parameters
----------
first_sid : int
The first sid to use for assigning sids to the created contracts.
root_symbols : list[str]
A list of root symbols for which to create futures.
years : list[int or str]
Years (e.g. 2014), for which to produce individual contracts.
month_codes : dict[str -> [1..12]], optional
Dictionary of month codes for which to create contracts. Entries
should be strings mapped to values from 1 (January) to 12 (December).
Default is zipline.futures.CME_CODE_TO_MONTH
multiplier : int
The contract multiplier.
Expiration dates are on the 20th of the month prior to the month code.
Notice dates are are on the 20th two months prior to the month code.
Start dates are one year before the contract month.
See Also
--------
make_future_info
"""
nineteen_days = pd.Timedelta(days=19)
one_year = pd.Timedelta(days=365)
return make_future_info(
first_sid=first_sid,
root_symbols=root_symbols,
years=years,
notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days,
expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days,
start_date_func=lambda dt: dt - one_year,
month_codes=month_codes,
multiplier=multiplier,
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/assets/synthetic.py | synthetic.py |
from collections import namedtuple
import re
from contextlib2 import ExitStack
import numpy as np
import pandas as pd
import sqlalchemy as sa
from toolz import first
from zipline.errors import AssetDBVersionError
from zipline.assets.asset_db_schema import (
ASSET_DB_VERSION,
asset_db_table_names,
asset_router,
equities as equities_table,
equity_symbol_mappings,
equity_supplementary_mappings as equity_supplementary_mappings_table,
futures_contracts as futures_contracts_table,
exchanges as exchanges_table,
futures_root_symbols,
metadata,
version_info,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.range import from_tuple, intersecting_ranges
from zipline.utils.sqlite_utils import coerce_string_to_eng
# Define a namedtuple for use with the load_data and _load_data methods
AssetData = namedtuple(
'AssetData', (
'equities',
'equities_mappings',
'futures',
'exchanges',
'root_symbols',
'equity_supplementary_mappings',
),
)
SQLITE_MAX_VARIABLE_NUMBER = 999
symbol_columns = frozenset({
'symbol',
'company_symbol',
'share_class_symbol',
})
mapping_columns = symbol_columns | {'start_date', 'end_date'}
def _default_none(df, column):
return None
def _no_default(df, column):
if not df.empty:
raise ValueError('no default value for column %r' % column)
# Default values for the equities DataFrame
_equities_defaults = {
'symbol': _default_none,
'asset_name': _default_none,
'start_date': lambda df, col: 0,
'end_date': lambda df, col: np.iinfo(np.int64).max,
'first_traded': _default_none,
'auto_close_date': _default_none,
# the full exchange name
'exchange': _no_default,
}
# Default values for the futures DataFrame
_futures_defaults = {
'symbol': _default_none,
'root_symbol': _default_none,
'asset_name': _default_none,
'start_date': lambda df, col: 0,
'end_date': lambda df, col: np.iinfo(np.int64).max,
'first_traded': _default_none,
'exchange': _default_none,
'notice_date': _default_none,
'expiration_date': _default_none,
'auto_close_date': _default_none,
'tick_size': _default_none,
'multiplier': lambda df, col: 1,
}
# Default values for the exchanges DataFrame
_exchanges_defaults = {
'canonical_name': lambda df, col: df.index,
'country_code': lambda df, col: '??',
}
# Default values for the root_symbols DataFrame
_root_symbols_defaults = {
'root_symbol_id': _default_none,
'sector': _default_none,
'description': _default_none,
'exchange': _default_none,
}
# Default values for the equity_supplementary_mappings DataFrame
_equity_supplementary_mappings_defaults = {
'sid': _default_none,
'value': _default_none,
'field': _default_none,
'start_date': lambda df, col: 0,
'end_date': lambda df, col: np.iinfo(np.int64).max,
}
# Fuzzy symbol delimiters that may break up a company symbol and share class
_delimited_symbol_delimiters_regex = re.compile(r'[./\-_]')
_delimited_symbol_default_triggers = frozenset({np.nan, None, ''})
def split_delimited_symbol(symbol):
"""
Takes in a symbol that may be delimited and splits it in to a company
symbol and share class symbol. Also returns the fuzzy symbol, which is the
symbol without any fuzzy characters at all.
Parameters
----------
symbol : str
The possibly-delimited symbol to be split
Returns
-------
company_symbol : str
The company part of the symbol.
share_class_symbol : str
The share class part of a symbol.
"""
# return blank strings for any bad fuzzy symbols, like NaN or None
if symbol in _delimited_symbol_default_triggers:
return '', ''
symbol = symbol.upper()
split_list = re.split(
pattern=_delimited_symbol_delimiters_regex,
string=symbol,
maxsplit=1,
)
# Break the list up in to its two components, the company symbol and the
# share class symbol
company_symbol = split_list[0]
if len(split_list) > 1:
share_class_symbol = split_list[1]
else:
share_class_symbol = ''
return company_symbol, share_class_symbol
def _generate_output_dataframe(data_subset, defaults):
"""
Generates an output dataframe from the given subset of user-provided
data, the given column names, and the given default values.
Parameters
----------
data_subset : DataFrame
A DataFrame, usually from an AssetData object,
that contains the user's input metadata for the asset type being
processed
defaults : dict
A dict where the keys are the names of the columns of the desired
output DataFrame and the values are a function from dataframe and
column name to the default values to insert in the DataFrame if no user
data is provided
Returns
-------
DataFrame
A DataFrame containing all user-provided metadata, and default values
wherever user-provided metadata was missing
"""
# The columns provided.
cols = set(data_subset.columns)
desired_cols = set(defaults)
# Drop columns with unrecognised headers.
data_subset.drop(cols - desired_cols,
axis=1,
inplace=True)
# Get those columns which we need but
# for which no data has been supplied.
for col in desired_cols - cols:
# write the default value for any missing columns
data_subset[col] = defaults[col](data_subset, col)
return data_subset
def _check_asset_group(group):
row = group.sort_values('end_date').iloc[-1]
row.start_date = group.start_date.min()
row.end_date = group.end_date.max()
row.drop(list(symbol_columns), inplace=True)
return row
def _format_range(r):
return (
str(pd.Timestamp(r.start, unit='ns')),
str(pd.Timestamp(r.stop, unit='ns')),
)
def _split_symbol_mappings(df, exchanges):
"""Split out the symbol: sid mappings from the raw data.
Parameters
----------
df : pd.DataFrame
The dataframe with multiple rows for each symbol: sid pair.
Returns
-------
asset_info : pd.DataFrame
The asset info with one row per asset.
symbol_mappings : pd.DataFrame
The dataframe of just symbol: sid mappings. The index will be
the sid, then there will be three columns: symbol, start_date, and
end_date.
"""
mappings = df[list(mapping_columns)].copy()
mappings['country_code'] = exchanges['country_code'][df['exchange']].values
ambigious = {}
def check_intersections(persymbol):
intersections = list(intersecting_ranges(map(
from_tuple,
zip(persymbol.start_date, persymbol.end_date),
)))
if intersections:
data = persymbol[
['start_date', 'end_date']
].astype('datetime64[ns]')
# indent the dataframe string, also compute this early because
# ``persymbol`` is a view and ``astype`` doesn't copy the index
# correctly in pandas 0.22
msg_component = '\n '.join(str(data).splitlines())
ambigious[persymbol.name] = intersections, msg_component
mappings.groupby(['symbol', 'country_code']).apply(check_intersections)
if ambigious:
raise ValueError(
'Ambiguous ownership for %d symbol%s, multiple assets held the'
' following symbols:\n%s' % (
len(ambigious),
'' if len(ambigious) == 1 else 's',
'\n'.join(
'%s (%s):\n intersections: %s\n %s' % (
symbol,
country_code,
tuple(map(_format_range, intersections)),
cs,
)
for (symbol, country_code), (intersections, cs) in sorted(
ambigious.items(),
key=first,
),
),
)
)
return (
df.groupby(level=0).apply(_check_asset_group),
df[list(mapping_columns)],
)
def _dt_to_epoch_ns(dt_series):
"""Convert a timeseries into an Int64Index of nanoseconds since the epoch.
Parameters
----------
dt_series : pd.Series
The timeseries to convert.
Returns
-------
idx : pd.Int64Index
The index converted to nanoseconds since the epoch.
"""
index = pd.to_datetime(dt_series.values)
if index.tzinfo is None:
index = index.tz_localize('UTC')
else:
index = index.tz_convert('UTC')
return index.view(np.int64)
def check_version_info(conn, version_table, expected_version):
"""
Checks for a version value in the version table.
Parameters
----------
conn : sa.Connection
The connection to use to perform the check.
version_table : sa.Table
The version table of the asset database
expected_version : int
The expected version of the asset database
Raises
------
AssetDBVersionError
If the version is in the table and not equal to ASSET_DB_VERSION.
"""
# Read the version out of the table
version_from_table = conn.execute(
sa.select((version_table.c.version,)),
).scalar()
# A db without a version is considered v0
if version_from_table is None:
version_from_table = 0
# Raise an error if the versions do not match
if (version_from_table != expected_version):
raise AssetDBVersionError(db_version=version_from_table,
expected_version=expected_version)
def write_version_info(conn, version_table, version_value):
"""
Inserts the version value in to the version table.
Parameters
----------
conn : sa.Connection
The connection to use to execute the insert.
version_table : sa.Table
The version table of the asset database
version_value : int
The version to write in to the database
"""
conn.execute(sa.insert(version_table, values={'version': version_value}))
class _empty(object):
columns = ()
class AssetDBWriter(object):
"""Class used to write data to an assets db.
Parameters
----------
engine : Engine or str
An SQLAlchemy engine or path to a SQL database.
"""
DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER
@preprocess(engine=coerce_string_to_eng(require_exists=False))
def __init__(self, engine):
self.engine = engine
def write(self,
equities=None,
futures=None,
exchanges=None,
root_symbols=None,
equity_supplementary_mappings=None,
chunk_size=DEFAULT_CHUNK_SIZE):
"""Write asset metadata to a sqlite database.
Parameters
----------
equities : pd.DataFrame, optional
The equity metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this equity.
asset_name : str
The full name for this asset.
start_date : datetime
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
auto_close_date : datetime, optional
The date on which to close any positions in this asset.
exchange : str
The exchange where this asset is traded.
The index of this dataframe should contain the sids.
futures : pd.DataFrame, optional
The future contract metadata. The columns for this dataframe are:
symbol : str
The ticker symbol for this futures contract.
root_symbol : str
The root symbol, or the symbol with the expiration stripped
out.
asset_name : str
The full name for this asset.
start_date : datetime, optional
The date when this asset was created.
end_date : datetime, optional
The last date we have trade data for this asset.
first_traded : datetime, optional
The first date we have trade data for this asset.
exchange : str
The exchange where this asset is traded.
notice_date : datetime
The date when the owner of the contract may be forced
to take physical delivery of the contract's asset.
expiration_date : datetime
The date when the contract expires.
auto_close_date : datetime
The date when the broker will automatically close any
positions in this contract.
tick_size : float
The minimum price movement of the contract.
multiplier: float
The amount of the underlying asset represented by this
contract.
exchanges : pd.DataFrame, optional
The exchanges where assets can be traded. The columns of this
dataframe are:
exchange : str
The full name of the exchange.
canonical_name : str
The canonical name of the exchange.
country_code : str
The ISO 3166 alpha-2 country code of the exchange.
root_symbols : pd.DataFrame, optional
The root symbols for the futures contracts. The columns for this
dataframe are:
root_symbol : str
The root symbol name.
root_symbol_id : int
The unique id for this root symbol.
sector : string, optional
The sector of this root symbol.
description : string, optional
A short description of this root symbol.
exchange : str
The exchange where this root symbol is traded.
equity_supplementary_mappings : pd.DataFrame, optional
Additional mappings from values of abitrary type to assets.
chunk_size : int, optional
The amount of rows to write to the SQLite table at once.
This defaults to the default number of bind params in sqlite.
If you have compiled sqlite3 with more bind or less params you may
want to pass that value here.
See Also
--------
zipline.assets.asset_finder
"""
if exchanges is None:
exchange_names = [
df['exchange']
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame({
'exchange': pd.concat(exchange_names).unique(),
})
with self.engine.begin() as conn:
# Ensure that the foreign key constraints are enforced on inserts.
# The ``foreign_keys`` pragma only applies to a given connection,
# not the whole database.
# conn.execute('PRAGMA foreign_keys = ON')
# Create SQL tables if they do not exist.
self.init_db(conn)
# Get the data to add to SQL.
data = self._load_data(
equities if equities is not None else pd.DataFrame(),
futures if futures is not None else pd.DataFrame(),
exchanges if exchanges is not None else pd.DataFrame(),
root_symbols if root_symbols is not None else pd.DataFrame(),
(
equity_supplementary_mappings
if equity_supplementary_mappings is not None
else pd.DataFrame()
),
)
# Write the data to SQL.
self._write_df_to_table(
exchanges_table,
data.exchanges,
conn,
chunk_size,
)
self._write_df_to_table(
futures_root_symbols,
data.root_symbols,
conn,
chunk_size,
)
self._write_df_to_table(
equity_supplementary_mappings_table,
data.equity_supplementary_mappings,
conn,
chunk_size,
idx=False,
)
self._write_assets(
'future',
data.futures,
conn,
chunk_size,
)
self._write_assets(
'equity',
data.equities,
conn,
chunk_size,
mapping_data=data.equities_mappings,
)
def _write_df_to_table(
self,
tbl,
df,
txn,
chunk_size,
idx=True,
idx_label=None,
):
df.to_sql(
tbl.name,
txn.connection,
index=idx,
index_label=(
idx_label
if idx_label is not None else
first(tbl.primary_key.columns).name
),
if_exists='append',
chunksize=chunk_size,
)
def _write_assets(self,
asset_type,
assets,
txn,
chunk_size,
mapping_data=None):
if asset_type == 'future':
tbl = futures_contracts_table
if mapping_data is not None:
raise TypeError('no mapping data expected for futures')
elif asset_type == 'equity':
tbl = equities_table
if mapping_data is None:
raise TypeError('mapping data required for equities')
# write the symbol mapping data.
self._write_df_to_table(
equity_symbol_mappings,
mapping_data,
txn,
chunk_size,
idx_label='sid',
)
else:
raise ValueError(
"asset_type must be in {'future', 'equity'}, got: %s" %
asset_type,
)
self._write_df_to_table(tbl, assets, txn, chunk_size)
pd.DataFrame({
asset_router.c.sid.name: assets.index.values,
asset_router.c.asset_type.name: asset_type,
}).to_sql(
asset_router.name,
txn.connection,
if_exists='append',
index=False,
chunksize=chunk_size
)
def _all_tables_present(self, txn):
"""
Checks if any tables are present in the current assets database.
Parameters
----------
txn : Transaction
The open transaction to check in.
Returns
-------
has_tables : bool
True if any tables are present, otherwise False.
"""
conn = txn.connect()
for table_name in asset_db_table_names:
if txn.dialect.has_table(conn, table_name):
return True
return False
def init_db(self, txn=None):
"""Connect to database and create tables.
Parameters
----------
txn : sa.engine.Connection, optional
The transaction to execute in. If this is not provided, a new
transaction will be started with the engine provided.
Returns
-------
metadata : sa.MetaData
The metadata that describes the new assets db.
"""
with ExitStack() as stack:
if txn is None:
txn = stack.enter_context(self.engine.begin())
tables_already_exist = self._all_tables_present(txn)
# Create the SQL tables if they do not already exist.
metadata.create_all(txn, checkfirst=True)
if tables_already_exist:
check_version_info(txn, version_info, ASSET_DB_VERSION)
else:
write_version_info(txn, version_info, ASSET_DB_VERSION)
def _normalize_equities(self, equities, exchanges):
# HACK: If 'company_name' is provided, map it to asset_name
if ('company_name' in equities.columns and
'asset_name' not in equities.columns):
equities['asset_name'] = equities['company_name']
# remap 'file_name' to 'symbol' if provided
if 'file_name' in equities.columns:
equities['symbol'] = equities['file_name']
equities_output = _generate_output_dataframe(
data_subset=equities,
defaults=_equities_defaults,
)
# Split symbols to company_symbols and share_class_symbols
tuple_series = equities_output['symbol'].apply(split_delimited_symbol)
split_symbols = pd.DataFrame(
tuple_series.tolist(),
columns=['company_symbol', 'share_class_symbol'],
index=tuple_series.index
)
equities_output = pd.concat((equities_output, split_symbols), axis=1)
# Upper-case all symbol data
for col in symbol_columns:
equities_output[col] = equities_output[col].str.upper()
# Convert date columns to UNIX Epoch integers (nanoseconds)
for col in ('start_date',
'end_date',
'first_traded',
'auto_close_date'):
equities_output[col] = _dt_to_epoch_ns(equities_output[col])
return _split_symbol_mappings(equities_output, exchanges)
def _normalize_futures(self, futures):
futures_output = _generate_output_dataframe(
data_subset=futures,
defaults=_futures_defaults,
)
for col in ('symbol', 'root_symbol'):
futures_output[col] = futures_output[col].str.upper()
for col in ('start_date',
'end_date',
'first_traded',
'notice_date',
'expiration_date',
'auto_close_date'):
futures_output[col] = _dt_to_epoch_ns(futures_output[col])
return futures_output
def _normalize_equity_supplementary_mappings(self, mappings):
mappings_output = _generate_output_dataframe(
data_subset=mappings,
defaults=_equity_supplementary_mappings_defaults,
)
for col in ('start_date', 'end_date'):
mappings_output[col] = _dt_to_epoch_ns(mappings_output[col])
return mappings_output
def _load_data(
self,
equities,
futures,
exchanges,
root_symbols,
equity_supplementary_mappings,
):
"""
Returns a standard set of pandas.DataFrames:
equities, futures, exchanges, root_symbols
"""
# Check whether identifier columns have been provided.
# If they have, set the index to this column.
# If not, assume the index already cotains the identifier information.
for df, id_col in [(equities, 'sid'),
(futures, 'sid'),
(exchanges, 'exchange'),
(root_symbols, 'root_symbol')]:
if id_col in df.columns:
df.set_index(id_col, inplace=True)
futures_output = self._normalize_futures(futures)
equity_supplementary_mappings_output = (
self._normalize_equity_supplementary_mappings(
equity_supplementary_mappings,
)
)
exchanges_output = _generate_output_dataframe(
data_subset=exchanges,
defaults=_exchanges_defaults,
)
equities_output, equities_mappings = self._normalize_equities(
equities,
exchanges_output,
)
root_symbols_output = _generate_output_dataframe(
data_subset=root_symbols,
defaults=_root_symbols_defaults,
)
return AssetData(
equities=equities_output,
equities_mappings=equities_mappings,
futures=futures_output,
exchanges=exchanges_output,
root_symbols=root_symbols_output,
equity_supplementary_mappings=equity_supplementary_mappings_output,
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/assets/asset_writer.py | asset_writer.py |
from alembic.migration import MigrationContext
from alembic.operations import Operations
import sqlalchemy as sa
from toolz.curried import do, operator
from zipline.assets.asset_writer import write_version_info
from zipline.utils.compat import wraps
from zipline.errors import AssetDBImpossibleDowngrade
from zipline.utils.preprocess import preprocess
from zipline.utils.sqlite_utils import coerce_string_to_eng
def alter_columns(op, name, *columns, **kwargs):
"""Alter columns from a table.
Parameters
----------
name : str
The name of the table.
*columns
The new columns to have.
selection_string : str, optional
The string to use in the selection. If not provided, it will select all
of the new columns from the old table.
Notes
-----
The columns are passed explicitly because this should only be used in a
downgrade where ``zipline.assets.asset_db_schema`` could change.
"""
selection_string = kwargs.pop('selection_string', None)
if kwargs:
raise TypeError(
'alter_columns received extra arguments: %r' % sorted(kwargs),
)
if selection_string is None:
selection_string = ', '.join(column.name for column in columns)
tmp_name = '_alter_columns_' + name
op.rename_table(name, tmp_name)
for column in columns:
# Clear any indices that already exist on this table, otherwise we will
# fail to create the table because the indices will already be present.
# When we create the table below, the indices that we want to preserve
# will just get recreated.
for table in name, tmp_name:
try:
op.drop_index('ix_%s_%s' % (table, column.name))
except sa.exc.OperationalError:
pass
op.create_table(name, *columns)
op.execute(
'insert into %s select %s from %s' % (
name,
selection_string,
tmp_name,
),
)
op.drop_table(tmp_name)
@preprocess(engine=coerce_string_to_eng(require_exists=True))
def downgrade(engine, desired_version):
"""Downgrades the assets db at the given engine to the desired version.
Parameters
----------
engine : Engine
An SQLAlchemy engine to the assets database.
desired_version : int
The desired resulting version for the assets database.
"""
# Check the version of the db at the engine
with engine.begin() as conn:
metadata = sa.MetaData(conn)
metadata.reflect()
version_info_table = metadata.tables['version_info']
starting_version = sa.select((version_info_table.c.version,)).scalar()
# Check for accidental upgrade
if starting_version < desired_version:
raise AssetDBImpossibleDowngrade(db_version=starting_version,
desired_version=desired_version)
# Check if the desired version is already the db version
if starting_version == desired_version:
# No downgrade needed
return
# Create alembic context
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
# Integer keys of downgrades to run
# E.g.: [5, 4, 3, 2] would downgrade v6 to v2
downgrade_keys = range(desired_version, starting_version)[::-1]
# Disable foreign keys until all downgrades are complete
_pragma_foreign_keys(conn, False)
# Execute the downgrades in order
for downgrade_key in downgrade_keys:
_downgrade_methods[downgrade_key](op, conn, version_info_table)
# Re-enable foreign keys
_pragma_foreign_keys(conn, True)
def _pragma_foreign_keys(connection, on):
"""Sets the PRAGMA foreign_keys state of the SQLite database. Disabling
the pragma allows for batch modification of tables with foreign keys.
Parameters
----------
connection : Connection
A SQLAlchemy connection to the db
on : bool
If true, PRAGMA foreign_keys will be set to ON. Otherwise, the PRAGMA
foreign_keys will be set to OFF.
"""
connection.execute("PRAGMA foreign_keys=%s" % ("ON" if on else "OFF"))
# This dict contains references to downgrade methods that can be applied to an
# assets db. The resulting db's version is the key.
# e.g. The method at key '0' is the downgrade method from v1 to v0
_downgrade_methods = {}
def downgrades(src):
"""Decorator for marking that a method is a downgrade to a version to the
previous version.
Parameters
----------
src : int
The version this downgrades from.
Returns
-------
decorator : callable[(callable) -> callable]
The decorator to apply.
"""
def _(f):
destination = src - 1
@do(operator.setitem(_downgrade_methods, destination))
@wraps(f)
def wrapper(op, conn, version_info_table):
conn.execute(version_info_table.delete()) # clear the version
f(op)
write_version_info(conn, version_info_table, destination)
return wrapper
return _
@downgrades(1)
def _downgrade_v1(op):
"""
Downgrade assets db by removing the 'tick_size' column and renaming the
'multiplier' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_futures_contracts_root_symbol')
op.drop_index('ix_futures_contracts_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('futures_contracts') as batch_op:
# Rename 'multiplier'
batch_op.alter_column(column_name='multiplier',
new_column_name='contract_multiplier')
# Delete 'tick_size'
batch_op.drop_column('tick_size')
# Recreate indices after batch
op.create_index('ix_futures_contracts_root_symbol',
table_name='futures_contracts',
columns=['root_symbol'])
op.create_index('ix_futures_contracts_symbol',
table_name='futures_contracts',
columns=['symbol'],
unique=True)
@downgrades(2)
def _downgrade_v2(op):
"""
Downgrade assets db by removing the 'auto_close_date' column.
"""
# Drop indices before batch
# This is to prevent index collision when creating the temp table
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
# Execute batch op to allow column modification in SQLite
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('auto_close_date')
# Recreate indices after batch
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol'])
@downgrades(3)
def _downgrade_v3(op):
"""
Downgrade assets db by adding a not null constraint on
``equities.first_traded``
"""
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
)
op.execute(
"""
insert into _new_equities
select * from equities
where equities.first_traded is not null
""",
)
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indices have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
)
@downgrades(4)
def _downgrade_v4(op):
"""
Downgrades assets db by copying the `exchange_full` column to `exchange`,
then dropping the `exchange_full` column.
"""
op.drop_index('ix_equities_fuzzy_symbol')
op.drop_index('ix_equities_company_symbol')
op.execute("UPDATE equities SET exchange = exchange_full")
with op.batch_alter_table('equities') as batch_op:
batch_op.drop_column('exchange_full')
op.create_index('ix_equities_fuzzy_symbol',
table_name='equities',
columns=['fuzzy_symbol'])
op.create_index('ix_equities_company_symbol',
table_name='equities',
columns=['company_symbol'])
@downgrades(5)
def _downgrade_v5(op):
op.create_table(
'_new_equities',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text),
sa.Column('company_symbol', sa.Text),
sa.Column('share_class_symbol', sa.Text),
sa.Column('fuzzy_symbol', sa.Text),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column('auto_close_date', sa.Integer),
sa.Column('exchange', sa.Text),
sa.Column('exchange_full', sa.Text)
)
op.execute(
"""
insert into _new_equities
select
equities.sid as sid,
sym.symbol as symbol,
sym.company_symbol as company_symbol,
sym.share_class_symbol as share_class_symbol,
sym.company_symbol || sym.share_class_symbol as fuzzy_symbol,
equities.asset_name as asset_name,
equities.start_date as start_date,
equities.end_date as end_date,
equities.first_traded as first_traded,
equities.auto_close_date as auto_close_date,
equities.exchange as exchange,
equities.exchange_full as exchange_full
from
equities
inner join
-- Nested select here to take the most recently held ticker
-- for each sid. The group by with no aggregation function will
-- take the last element in the group, so we first order by
-- the end date ascending to ensure that the groupby takes
-- the last ticker.
(select
*
from
(select
*
from
equity_symbol_mappings
order by
equity_symbol_mappings.end_date asc)
group by
sid) sym
on
equities.sid == sym.sid
""",
)
op.drop_table('equity_symbol_mappings')
op.drop_table('equities')
op.rename_table('_new_equities', 'equities')
# we need to make sure the indicies have the proper names after the rename
op.create_index(
'ix_equities_company_symbol',
'equities',
['company_symbol'],
)
op.create_index(
'ix_equities_fuzzy_symbol',
'equities',
['fuzzy_symbol'],
)
@downgrades(6)
def _downgrade_v6(op):
op.drop_table('equity_supplementary_mappings')
@downgrades(7)
def _downgrade_v7(op):
tmp_name = '_new_equities'
op.create_table(
tmp_name,
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column('auto_close_date', sa.Integer),
# remove foreign key to exchange
sa.Column('exchange', sa.Text),
# add back exchange full column
sa.Column('exchange_full', sa.Text),
)
op.execute(
"""
insert into
_new_equities
select
eq.sid,
eq.asset_name,
eq.start_date,
eq.end_date,
eq.first_traded,
eq.auto_close_date,
ex.canonical_name,
ex.exchange
from
equities eq
inner join
exchanges ex
on
eq.exchange == ex.exchange
where
ex.country_code in ('US', '??')
""",
)
op.drop_table('equities')
op.rename_table(tmp_name, 'equities')
# rebuild all tables without a foreign key to ``exchanges``
alter_columns(
op,
'futures_root_symbols',
sa.Column(
'root_symbol',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('root_symbol_id', sa.Integer),
sa.Column('sector', sa.Text),
sa.Column('description', sa.Text),
sa.Column('exchange', sa.Text),
)
alter_columns(
op,
'futures_contracts',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text, unique=True, index=True),
sa.Column('root_symbol', sa.Text, index=True),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column('exchange', sa.Text),
sa.Column('notice_date', sa.Integer, nullable=False),
sa.Column('expiration_date', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer, nullable=False),
sa.Column('multiplier', sa.Float),
sa.Column('tick_size', sa.Float),
)
# drop the ``country_code`` and ``canonical_name`` columns
alter_columns(
op,
'exchanges',
sa.Column(
'exchange',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('timezone', sa.Text),
# Set the timezone to NULL because we don't know what it was before.
# Nothing in zipline reads the timezone so it doesn't matter.
selection_string="exchange, NULL",
)
op.rename_table('exchanges', 'futures_exchanges')
# add back the foreign keys that previously existed
alter_columns(
op,
'futures_root_symbols',
sa.Column(
'root_symbol',
sa.Text,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('root_symbol_id', sa.Integer),
sa.Column('sector', sa.Text),
sa.Column('description', sa.Text),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey('futures_exchanges.exchange'),
),
)
alter_columns(
op,
'futures_contracts',
sa.Column(
'sid',
sa.Integer,
unique=True,
nullable=False,
primary_key=True,
),
sa.Column('symbol', sa.Text, unique=True, index=True),
sa.Column(
'root_symbol',
sa.Text,
sa.ForeignKey('futures_root_symbols.root_symbol'),
index=True
),
sa.Column('asset_name', sa.Text),
sa.Column('start_date', sa.Integer, default=0, nullable=False),
sa.Column('end_date', sa.Integer, nullable=False),
sa.Column('first_traded', sa.Integer),
sa.Column(
'exchange',
sa.Text,
sa.ForeignKey('futures_exchanges.exchange'),
),
sa.Column('notice_date', sa.Integer, nullable=False),
sa.Column('expiration_date', sa.Integer, nullable=False),
sa.Column('auto_close_date', sa.Integer, nullable=False),
sa.Column('multiplier', sa.Float),
sa.Column('tick_size', sa.Float),
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/assets/asset_db_migrations.py | asset_db_migrations.py |
from abc import ABCMeta, abstractmethod
from six import with_metaclass
# Number of days over which to compute rolls when finding the current contract
# for a volume-rolling contract chain. For more details on why this is needed,
# see `VolumeRollFinder.get_contract_center`.
ROLL_DAYS_FOR_CURRENT_CONTRACT = 90
class RollFinder(with_metaclass(ABCMeta, object)):
"""
Abstract base class for calculating when futures contracts are the active
contract.
"""
@abstractmethod
def _active_contract(self, oc, front, back, dt):
raise NotImplementedError
def _get_active_contract_at_offset(self, root_symbol, dt, offset):
"""
For the given root symbol, find the contract that is considered active
on a specific date at a specific offset.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
session = self.trading_calendar.minute_to_session_label(dt)
front = oc.contract_before_auto_close(session.value)
back = oc.contract_at_offset(front, 1, dt.value)
if back is None:
return front
primary = self._active_contract(oc, front, back, session)
return oc.contract_at_offset(primary, offset, session.value)
def get_contract_center(self, root_symbol, dt, offset):
"""
Parameters
----------
root_symbol : str
The root symbol for the contract chain.
dt : Timestamp
The datetime for which to retrieve the current contract.
offset : int
The offset from the primary contract.
0 is the primary, 1 is the secondary, etc.
Returns
-------
Future
The active future contract at the given dt.
"""
return self._get_active_contract_at_offset(root_symbol, dt, offset)
def get_rolls(self, root_symbol, start, end, offset):
"""
Get the rolls, i.e. the session at which to hop from contract to
contract in the chain.
Parameters
----------
root_symbol : str
The root symbol for which to calculate rolls.
start : Timestamp
Start of the date range.
end : Timestamp
End of the date range.
offset : int
Offset from the primary.
Returns
-------
rolls - list[tuple(sid, roll_date)]
A list of rolls, where first value is the first active `sid`,
and the `roll_date` on which to hop to the next contract.
The last pair in the chain has a value of `None` since the roll
is after the range.
"""
oc = self.asset_finder.get_ordered_contracts(root_symbol)
front = self._get_active_contract_at_offset(root_symbol, end, 0)
back = oc.contract_at_offset(front, 1, end.value)
if back is not None:
end_session = self.trading_calendar.minute_to_session_label(end)
first = self._active_contract(oc, front, back, end_session)
else:
first = front
first_contract = oc.sid_to_contract[first]
rolls = [((first_contract >> offset).contract.sid, None)]
tc = self.trading_calendar
sessions = tc.sessions_in_range(tc.minute_to_session_label(start),
tc.minute_to_session_label(end))
freq = sessions.freq
if first == front:
# This is a bit tricky to grasp. Once we have the active contract
# on the given end date, we want to start walking backwards towards
# the start date and checking for rolls. For this, we treat the
# previous month's contract as the 'first' contract, and the
# contract we just found to be active as the 'back'. As we walk
# towards the start date, if the 'back' is no longer active, we add
# that date as a roll.
curr = first_contract << 1
else:
curr = first_contract << 2
session = sessions[-1]
while session > start and curr is not None:
front = curr.contract.sid
back = rolls[0][0]
prev_c = curr.prev
while session > start:
prev = session - freq
if prev_c is not None:
if prev < prev_c.contract.auto_close_date:
break
if back != self._active_contract(oc, front, back, prev):
# TODO: Instead of listing each contract with its roll date
# as tuples, create a series which maps every day to the
# active contract on that day.
rolls.insert(0, ((curr >> offset).contract.sid, session))
break
session = prev
curr = curr.prev
if curr is not None:
session = min(session, curr.contract.auto_close_date + freq)
return rolls
class CalendarRollFinder(RollFinder):
"""
The CalendarRollFinder calculates contract rolls based purely on the
contract's auto close date.
"""
def __init__(self, trading_calendar, asset_finder):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
def _active_contract(self, oc, front, back, dt):
contract = oc.sid_to_contract[front].contract
auto_close_date = contract.auto_close_date
auto_closed = dt >= auto_close_date
return back if auto_closed else front
class VolumeRollFinder(RollFinder):
"""
The CalendarRollFinder calculates contract rolls based on when
volume activity transfers from one contract to another.
"""
GRACE_DAYS = 7
THRESHOLD = 0.10
def __init__(self, trading_calendar, asset_finder, session_reader):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self.session_reader = session_reader
def _active_contract(self, oc, front, back, dt):
"""
Return the active contract based on the previous trading day's volume.
In the rare case that a double volume switch occurs we treat the first
switch as the roll. Take the following case for example:
| +++++ _____
| + __ / <--- 'G'
| ++/++\++++/++
| _/ \__/ +
| / +
| ____/ + <--- 'F'
|_________|__|___|________
a b c <--- Switches
We should treat 'a' as the roll date rather than 'c' because from the
perspective of 'a', if a switch happens and we are pretty close to the
auto-close date, we would probably assume it is time to roll. This
means that for every date after 'a', `data.current(cf, 'contract')`
should return the 'G' contract.
"""
front_contract = oc.sid_to_contract[front].contract
back_contract = oc.sid_to_contract[back].contract
tc = self.trading_calendar
trading_day = tc.day
prev = dt - trading_day
get_value = self.session_reader.get_value
# If the front contract is past its auto close date it cannot be the
# active contract, so return the back contract. Similarly, if the back
# contract has not even started yet, just return the front contract.
# The reason for using 'prev' to see if the contracts are alive instead
# of using 'dt' is because we need to get each contract's volume on the
# previous day, so we need to make sure that each contract exists on
# 'prev' in order to call 'get_value' below.
if dt > min(front_contract.auto_close_date, front_contract.end_date):
return back
elif front_contract.start_date > prev:
return back
elif dt > min(back_contract.auto_close_date, back_contract.end_date):
return front
elif back_contract.start_date > prev:
return front
front_vol = get_value(front, prev, 'volume')
back_vol = get_value(back, prev, 'volume')
if back_vol > front_vol:
return back
gap_start = max(
back_contract.start_date,
front_contract.auto_close_date - (trading_day * self.GRACE_DAYS),
)
gap_end = prev - trading_day
if dt < gap_start:
return front
# If we are within `self.GRACE_DAYS` of the front contract's auto close
# date, and a volume flip happened during that period, return the back
# contract as the active one.
sessions = tc.sessions_in_range(
tc.minute_to_session_label(gap_start),
tc.minute_to_session_label(gap_end),
)
for session in sessions:
front_vol = get_value(front, session, 'volume')
back_vol = get_value(back, session, 'volume')
if back_vol > front_vol:
return back
return front
def get_contract_center(self, root_symbol, dt, offset):
"""
Parameters
----------
root_symbol : str
The root symbol for the contract chain.
dt : Timestamp
The datetime for which to retrieve the current contract.
offset : int
The offset from the primary contract.
0 is the primary, 1 is the secondary, etc.
Returns
-------
Future
The active future contract at the given dt.
"""
# When determining the center contract on a specific day using volume
# rolls, simply picking the contract with the highest volume could
# cause flip-flopping between active contracts each day if the front
# and back contracts are close in volume. Therefore, information about
# the surrounding rolls is required. The `get_rolls` logic prevents
# contracts from being considered active once they have rolled, so
# incorporating that logic here prevents flip-flopping.
day = self.trading_calendar.day
end_date = min(
dt + (ROLL_DAYS_FOR_CURRENT_CONTRACT * day),
self.session_reader.last_available_dt,
)
rolls = self.get_rolls(
root_symbol=root_symbol, start=dt, end=end_date, offset=offset,
)
sid, acd = rolls[0]
return self.asset_finder.retrieve_asset(sid) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/assets/roll_finder.py | roll_finder.py |
import sys
import logbook
import numpy as np
from zipline.finance import commission, slippage
zipline_logging = logbook.NestedSetup([
logbook.NullHandler(),
logbook.StreamHandler(sys.stdout, level=logbook.INFO),
logbook.StreamHandler(sys.stderr, level=logbook.ERROR),
])
zipline_logging.push_application()
STOCKS = ['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM']
# On-Line Portfolio Moving Average Reversion
# More info can be found in the corresponding paper:
# http://icml.cc/2012/papers/168.pdf
def initialize(algo, eps=1, window_length=5):
algo.stocks = STOCKS
algo.sids = [algo.symbol(symbol) for symbol in algo.stocks]
algo.m = len(algo.stocks)
algo.price = {}
algo.b_t = np.ones(algo.m) / algo.m
algo.last_desired_port = np.ones(algo.m) / algo.m
algo.eps = eps
algo.init = True
algo.days = 0
algo.window_length = window_length
algo.set_commission(commission.PerShare(cost=0, min_trade_cost=1.0))
algo.set_slippage(slippage.VolumeShareSlippage())
def handle_data(algo, data):
algo.days += 1
if algo.days < algo.window_length:
return
if algo.init:
rebalance_portfolio(algo, data, algo.b_t)
algo.init = False
return
m = algo.m
x_tilde = np.zeros(m)
# find relative moving average price for each asset
mavgs = data.history(algo.sids, 'price', algo.window_length, '1d').mean()
for i, sid in enumerate(algo.sids):
price = data.current(sid, "price")
# Relative mean deviation
x_tilde[i] = mavgs[sid] / price
###########################
# Inside of OLMAR (algo 2)
x_bar = x_tilde.mean()
# market relative deviation
mark_rel_dev = x_tilde - x_bar
# Expected return with current portfolio
exp_return = np.dot(algo.b_t, x_tilde)
weight = algo.eps - exp_return
variability = (np.linalg.norm(mark_rel_dev)) ** 2
# test for divide-by-zero case
if variability == 0.0:
step_size = 0
else:
step_size = max(0, weight / variability)
b = algo.b_t + step_size * mark_rel_dev
b_norm = simplex_projection(b)
np.testing.assert_almost_equal(b_norm.sum(), 1)
rebalance_portfolio(algo, data, b_norm)
# update portfolio
algo.b_t = b_norm
def rebalance_portfolio(algo, data, desired_port):
# rebalance portfolio
desired_amount = np.zeros_like(desired_port)
current_amount = np.zeros_like(desired_port)
prices = np.zeros_like(desired_port)
if algo.init:
positions_value = algo.portfolio.starting_cash
else:
positions_value = algo.portfolio.positions_value + \
algo.portfolio.cash
for i, sid in enumerate(algo.sids):
current_amount[i] = algo.portfolio.positions[sid].amount
prices[i] = data.current(sid, "price")
desired_amount = np.round(desired_port * positions_value / prices)
algo.last_desired_port = desired_port
diff_amount = desired_amount - current_amount
for i, sid in enumerate(algo.sids):
algo.order(sid, diff_amount[i])
def simplex_projection(v, b=1):
"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi ([email protected])
Python-port: Copyright 2013 by Thomas Wiecki ([email protected]).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
results.portfolio_value.plot(ax=ax)
ax.set_ylabel('Portfolio value (USD)')
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2004', tz='utc'),
'end': pd.Timestamp('2008', tz='utc'),
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/examples/olmar.py | olmar.py |
from zipline.api import order, record, symbol
from zipline.finance import commission, slippage
# Import exponential moving average from talib wrapper
from talib import EMA
def initialize(context):
context.asset = symbol('AAPL')
# To keep track of whether we invested in the stock or not
context.invested = False
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def handle_data(context, data):
trailing_window = data.history(context.asset, 'price', 40, '1d')
if trailing_window.isnull().values.any():
return
short_ema = EMA(trailing_window.values, timeperiod=20)
long_ema = EMA(trailing_window.values, timeperiod=40)
buy = False
sell = False
if (short_ema[-1] > long_ema[-1]) and not context.invested:
order(context.asset, 100)
context.invested = True
buy = True
elif (short_ema[-1] < long_ema[-1]) and context.invested:
order(context.asset, -100)
context.invested = False
sell = True
record(AAPL=data.current(context.asset, "price"),
short_ema=short_ema[-1],
long_ema=long_ema[-1],
buy=buy,
sell=sell)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if 'AAPL' in results and 'short_ema' in results and 'long_ema' in results:
results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2)
ax2.plot(
results.index[results.buy],
results.loc[results.buy, 'long_ema'],
'^',
markersize=10,
color='m',
)
ax2.plot(
results.index[results.sell],
results.loc[results.sell, 'short_ema'],
'v',
markersize=10,
color='k',
)
plt.legend(loc=0)
plt.gcf().set_size_inches(18, 8)
else:
msg = 'AAPL, short_ema and long_ema data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2014-01-01', tz='utc'),
'end': pd.Timestamp('2014-11-01', tz='utc'),
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/examples/dual_ema_talib.py | dual_ema_talib.py |
from six import viewkeys
from zipline.api import (
attach_pipeline,
date_rules,
order_target_percent,
pipeline_output,
record,
schedule_function,
)
from zipline.finance import commission, slippage
from zipline.pipeline import Pipeline
from zipline.pipeline.factors import RSI
def make_pipeline():
rsi = RSI()
return Pipeline(
columns={
'longs': rsi.top(3),
'shorts': rsi.bottom(3),
},
)
def rebalance(context, data):
# Pipeline data will be a dataframe with boolean columns named 'longs' and
# 'shorts'.
pipeline_data = context.pipeline_data
all_assets = pipeline_data.index
longs = all_assets[pipeline_data.longs]
shorts = all_assets[pipeline_data.shorts]
record(universe_size=len(all_assets))
# Build a 2x-leveraged, equal-weight, long-short portfolio.
one_third = 1.0 / 3.0
for asset in longs:
order_target_percent(asset, one_third)
for asset in shorts:
order_target_percent(asset, -one_third)
# Remove any assets that should no longer be in our portfolio.
portfolio_assets = longs | shorts
positions = context.portfolio.positions
for asset in viewkeys(positions) - set(portfolio_assets):
# This will fail if the asset was removed from our portfolio because it
# was delisted.
if data.can_trade(asset):
order_target_percent(asset, 0)
def initialize(context):
attach_pipeline(make_pipeline(), 'my_pipeline')
# Rebalance each day. In daily mode, this is equivalent to putting
# `rebalance` in our handle_data, but in minute mode, it's equivalent to
# running at the start of the day each day.
schedule_function(rebalance, date_rules.every_day())
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def before_trading_start(context, data):
context.pipeline_data = pipeline_output('my_pipeline')
def _test_args():
"""
Extra arguments to use when zipline's automated tests run this example.
Notes for testers:
Gross leverage should be roughly 2.0 on every day except the first.
Net leverage should be roughly 2.0 on every day except the first.
Longs Count should always be 3 after the first day.
Shorts Count should be 3 after the first day, except on 2013-10-30, when it
dips to 2 for a day because DELL is delisted.
"""
import pandas as pd
return {
# We run through october of 2013 because DELL is in the test data and
# it went private on 2013-10-29.
'start': pd.Timestamp('2013-10-07', tz='utc'),
'end': pd.Timestamp('2013-11-30', tz='utc'),
'capital_base': 100000,
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/examples/momentum_pipeline.py | momentum_pipeline.py |
from importlib import import_module
import os
from toolz import merge
from trading_calendars import register_calendar, get_calendar
from zipline import run_algorithm
# These are used by test_examples.py to discover the examples to run.
EXAMPLE_MODULES = {}
for f in os.listdir(os.path.dirname(__file__)):
if not f.endswith('.py') or f == '__init__.py':
continue
modname = f[:-len('.py')]
mod = import_module('.' + modname, package=__name__)
EXAMPLE_MODULES[modname] = mod
globals()[modname] = mod
# Remove noise from loop variables.
del f, modname, mod
# Columns that we expect to be able to reliably deterministic
# Doesn't include fields that have UUIDS.
_cols_to_check = [
'algo_volatility',
'algorithm_period_return',
'alpha',
'benchmark_period_return',
'benchmark_volatility',
'beta',
'capital_used',
'ending_cash',
'ending_exposure',
'ending_value',
'excess_return',
'gross_leverage',
'long_exposure',
'long_value',
'longs_count',
'max_drawdown',
'max_leverage',
'net_leverage',
'period_close',
'period_label',
'period_open',
'pnl',
'portfolio_value',
'positions',
'returns',
'short_exposure',
'short_value',
'shorts_count',
'sortino',
'starting_cash',
'starting_exposure',
'starting_value',
'trading_days',
'treasury_period_return',
]
def run_example(example_name, environ):
"""
Run an example module from zipline.examples.
"""
mod = EXAMPLE_MODULES[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, 'initialize', None),
handle_data=getattr(mod, 'handle_data', None),
before_trading_start=getattr(mod, 'before_trading_start', None),
analyze=getattr(mod, 'analyze', None),
bundle='test',
environ=environ,
# Provide a default capital base, but allow the test to override.
**merge({'capital_base': 1e7}, mod._test_args())
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/examples/__init__.py | __init__.py |
from zipline.api import order_target, record, symbol
from zipline.finance import commission, slippage
def initialize(context):
context.sym = symbol('GOOG')
context.i = 0
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = data.history(context.sym, 'price', 100, '1d').mean()
long_mavg = data.history(context.sym, 'price', 300, '1d').mean()
# Trading logic
if short_mavg > long_mavg:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg < long_mavg:
order_target(context.sym, 0)
# Save values for later inspection
record(GOOG=data.current(context.sym, "price"),
short_mavg=short_mavg,
long_mavg=long_mavg)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger('Algorithm')
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel('Portfolio value (USD)')
ax2 = fig.add_subplot(212)
ax2.set_ylabel('Price (USD)')
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if ('GOOG' in results and 'short_mavg' in results and
'long_mavg' in results):
results['GOOG'].plot(ax=ax2)
results[['short_mavg', 'long_mavg']].plot(ax=ax2)
trans = results.ix[[t != [] for t in results.transactions]]
buys = trans.ix[[t[0]['amount'] > 0 for t in
trans.transactions]]
sells = trans.ix[
[t[0]['amount'] < 0 for t in trans.transactions]]
ax2.plot(buys.index, results.short_mavg.ix[buys.index],
'^', markersize=10, color='m')
ax2.plot(sells.index, results.short_mavg.ix[sells.index],
'v', markersize=10, color='k')
plt.legend(loc=0)
else:
msg = 'GOOG, short_mavg & long_mavg data not captured using record().'
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example.
"""
import pandas as pd
return {
'start': pd.Timestamp('2011', tz='utc'),
'end': pd.Timestamp('2013', tz='utc'),
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/examples/dual_moving_average.py | dual_moving_average.py |
import pandas as pd
from zipline.errors import (
InvalidBenchmarkAsset,
BenchmarkAssetNotAvailableTooEarly,
BenchmarkAssetNotAvailableTooLate
)
class BenchmarkSource(object):
def __init__(self,
benchmark_asset,
trading_calendar,
sessions,
data_portal,
emission_rate="daily",
benchmark_returns=None):
self.benchmark_asset = benchmark_asset
self.sessions = sessions
self.emission_rate = emission_rate
self.data_portal = data_portal
if len(sessions) == 0:
self._precalculated_series = pd.Series()
elif benchmark_asset is not None:
self._validate_benchmark(benchmark_asset)
(self._precalculated_series,
self._daily_returns) = self._initialize_precalculated_series(
benchmark_asset,
trading_calendar,
sessions,
data_portal
)
elif benchmark_returns is not None:
self._daily_returns = daily_series = benchmark_returns.reindex(
sessions,
).fillna(0)
if self.emission_rate == "minute":
# we need to take the env's benchmark returns, which are daily,
# and resample them to minute
minutes = trading_calendar.minutes_for_sessions_in_range(
sessions[0],
sessions[-1]
)
minute_series = daily_series.reindex(
index=minutes,
method="ffill"
)
self._precalculated_series = minute_series
else:
self._precalculated_series = daily_series
else:
raise Exception("Must provide either benchmark_asset or "
"benchmark_returns.")
def get_value(self, dt):
"""Look up the returns for a given dt.
Parameters
----------
dt : datetime
The label to look up.
Returns
-------
returns : float
The returns at the given dt or session.
See Also
--------
:class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[dt]
def get_range(self, start_dt, end_dt):
"""Look up the returns for a given period.
Parameters
----------
start_dt : datetime
The inclusive start label.
end_dt : datetime
The inclusive end label.
Returns
-------
returns : pd.Series
The series of returns.
See Also
--------
:class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[start_dt:end_dt]
def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
"""
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end]
def _validate_benchmark(self, benchmark_asset):
# check if this security has a stock dividend. if so, raise an
# error suggesting that the user pick a different asset to use
# as benchmark.
stock_dividends = \
self.data_portal.get_stock_dividends(self.benchmark_asset,
self.sessions)
if len(stock_dividends) > 0:
raise InvalidBenchmarkAsset(
sid=str(self.benchmark_asset),
dt=stock_dividends[0]["ex_date"]
)
if benchmark_asset.start_date > self.sessions[0]:
# the asset started trading after the first simulation day
raise BenchmarkAssetNotAvailableTooEarly(
sid=str(self.benchmark_asset),
dt=self.sessions[0],
start_dt=benchmark_asset.start_date
)
if benchmark_asset.end_date < self.sessions[-1]:
# the asset stopped trading before the last simulation day
raise BenchmarkAssetNotAvailableTooLate(
sid=str(self.benchmark_asset),
dt=self.sessions[-1],
end_dt=benchmark_asset.end_date
)
@staticmethod
def _compute_daily_returns(g):
return (g[-1] - g[0]) / g[0]
@classmethod
def downsample_minute_return_series(cls,
trading_calendar,
minutely_returns):
sessions = trading_calendar.minute_index_to_session_labels(
minutely_returns.index,
)
closes = trading_calendar.session_closes_in_range(
sessions[0],
sessions[-1],
)
daily_returns = minutely_returns[closes].pct_change()
daily_returns.index = closes.index
return daily_returns.iloc[1:]
def _initialize_precalculated_series(self,
asset,
trading_calendar,
trading_days,
data_portal):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
'open',
trading_days[0],
'daily',
)
first_close = data_portal.get_spot_value(
asset,
'close',
trading_days[0],
'daily',
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
'cannot set benchmark to asset that does not exist during'
' the simulation period (asset start date=%r)' % start_date
) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/sources/benchmark_source.py | benchmark_source.py |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import hashlib
from textwrap import dedent
import warnings
from logbook import Logger
import numpy
import pandas as pd
from pandas import read_csv
import pytz
import requests
from six import StringIO, iteritems, with_metaclass
from zipline.errors import (
MultipleSymbolsFound,
SymbolNotFound,
ZiplineError
)
from zipline.protocol import (
DATASOURCE_TYPE,
Event
)
from zipline.assets import Equity
logger = Logger('Requests Source Logger')
def roll_dts_to_midnight(dts, trading_day):
if len(dts) == 0:
return dts
return pd.DatetimeIndex(
(dts.tz_convert('US/Eastern') - pd.Timedelta(hours=16)).date,
tz='UTC',
) + trading_day
class FetcherEvent(Event):
pass
class FetcherCSVRedirectError(ZiplineError):
msg = dedent(
"""\
Attempt to fetch_csv from a redirected url. {url}
must be changed to {new_url}
"""
)
def __init__(self, *args, **kwargs):
self.url = kwargs["url"]
self.new_url = kwargs["new_url"]
self.extra = kwargs["extra"]
super(FetcherCSVRedirectError, self).__init__(*args, **kwargs)
# The following optional arguments are supported for
# requests backed data sources.
# see http://docs.python-requests.org/en/latest/api/#main-interface
# for a full list.
ALLOWED_REQUESTS_KWARGS = {
'params',
'headers',
'auth',
'cert'
}
# The following optional arguments are supported for pandas' read_csv
# function, and may be passed as kwargs to the datasource below.
# see http://pandas.pydata.org/
# pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
ALLOWED_READ_CSV_KWARGS = {
'sep',
'dialect',
'doublequote',
'escapechar',
'quotechar',
'quoting',
'skipinitialspace',
'lineterminator',
'header',
'index_col',
'names',
'prefix',
'skiprows',
'skipfooter',
'skip_footer',
'na_values',
'true_values',
'false_values',
'delimiter',
'converters',
'dtype',
'delim_whitespace',
'as_recarray',
'na_filter',
'compact_ints',
'use_unsigned',
'buffer_lines',
'warn_bad_lines',
'error_bad_lines',
'keep_default_na',
'thousands',
'comment',
'decimal',
'keep_date_col',
'nrows',
'chunksize',
'encoding',
'usecols'
}
SHARED_REQUESTS_KWARGS = {
'stream': True,
'allow_redirects': False,
}
def mask_requests_args(url, validating=False, params_checker=None, **kwargs):
requests_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_REQUESTS_KWARGS}
if params_checker is not None:
url, s_params = params_checker(url)
if s_params:
if 'params' in requests_kwargs:
requests_kwargs['params'].update(s_params)
else:
requests_kwargs['params'] = s_params
# Giving the connection 30 seconds. This timeout does not
# apply to the download of the response body.
# (Note that Quandl links can take >10 seconds to return their
# first byte on occasion)
requests_kwargs['timeout'] = 1.0 if validating else 30.0
requests_kwargs.update(SHARED_REQUESTS_KWARGS)
request_pair = namedtuple("RequestPair", ("requests_kwargs", "url"))
return request_pair(requests_kwargs, url)
class PandasCSV(with_metaclass(ABCMeta, object)):
def __init__(self,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**kwargs):
self.start_date = start_date
self.end_date = end_date
self.date_column = date_column
self.date_format = date_format
self.timezone = timezone
self.mask = mask
self.symbol_column = symbol_column or "symbol"
self.data_frequency = data_frequency
invalid_kwargs = set(kwargs) - ALLOWED_READ_CSV_KWARGS
if invalid_kwargs:
raise TypeError(
"Unexpected keyword arguments: %s" % invalid_kwargs,
)
self.pandas_kwargs = self.mask_pandas_args(kwargs)
self.symbol = symbol
self.finder = asset_finder
self.trading_day = trading_day
self.pre_func = pre_func
self.post_func = post_func
@property
def fields(self):
return self.df.columns.tolist()
def get_hash(self):
return self.namestring
@abstractmethod
def fetch_data(self):
return
@staticmethod
def parse_date_str_series(format_str, tz, date_str_series, data_frequency,
trading_day):
"""
Efficient parsing for a 1d Pandas/numpy object containing string
representations of dates.
Note: pd.to_datetime is significantly faster when no format string is
passed, and in pandas 0.12.0 the %p strptime directive is not correctly
handled if a format string is explicitly passed, but AM/PM is handled
properly if format=None.
Moreover, we were previously ignoring this parameter unintentionally
because we were incorrectly passing it as a positional. For all these
reasons, we ignore the format_str parameter when parsing datetimes.
"""
# Explicitly ignoring this parameter. See note above.
if format_str is not None:
logger.warn(
"The 'format_str' parameter to fetch_csv is deprecated. "
"Ignoring and defaulting to pandas default date parsing."
)
format_str = None
tz_str = str(tz)
if tz_str == pytz.utc.zone:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
utc=True,
errors='coerce',
)
else:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
errors='coerce',
).tz_localize(tz_str).tz_convert('UTC')
if data_frequency == 'daily':
parsed = roll_dts_to_midnight(parsed, trading_day)
return parsed
def mask_pandas_args(self, kwargs):
pandas_kwargs = {key: val for (key, val) in iteritems(kwargs)
if key in ALLOWED_READ_CSV_KWARGS}
if 'usecols' in pandas_kwargs:
usecols = pandas_kwargs['usecols']
if usecols and self.date_column not in usecols:
# make a new list so we don't modify user's,
# and to ensure it is mutable
with_date = list(usecols)
with_date.append(self.date_column)
pandas_kwargs['usecols'] = with_date
# No strings in the 'symbol' column should be interpreted as NaNs
pandas_kwargs.setdefault('keep_default_na', False)
pandas_kwargs.setdefault('na_values', {'symbol': []})
return pandas_kwargs
def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(uppered, as_of_date=None)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan
def load_df(self):
df = self.fetch_data()
if self.pre_func:
df = self.pre_func(df)
# Batch-convert the user-specifed date column into timestamps.
df['dt'] = self.parse_date_str_series(
self.date_format,
self.timezone,
df[self.date_column],
self.data_frequency,
self.trading_day,
).values
# ignore rows whose dates we couldn't parse
df = df[df['dt'].notnull()]
if self.symbol is not None:
df['sid'] = self.symbol
elif self.finder:
df.sort_values(by=self.symbol_column, inplace=True)
# Pop the 'sid' column off of the DataFrame, just in case the user
# has assigned it, and throw a warning
try:
df.pop('sid')
warnings.warn(
"Assignment of the 'sid' column of a DataFrame is "
"not supported by Fetcher. The 'sid' column has been "
"overwritten.",
category=UserWarning,
stacklevel=2,
)
except KeyError:
# There was no 'sid' column, so no warning is necessary
pass
# Fill entries for any symbols that don't require a date to
# uniquely identify. Entries for which multiple securities exist
# are replaced with zeroes, while entries for which no asset
# exists are replaced with NaNs.
unique_symbols = df[self.symbol_column].unique()
sid_series = pd.Series(
data=map(self._lookup_unconflicted_symbol, unique_symbols),
index=unique_symbols,
name='sid',
)
df = df.join(sid_series, on=self.symbol_column)
# Fill any zero entries left in our sid column by doing a lookup
# using both symbol and the row date.
conflict_rows = df[df['sid'] == 0]
for row_idx, row in conflict_rows.iterrows():
try:
asset = self.finder.lookup_symbol(
row[self.symbol_column],
# Replacing tzinfo here is necessary because of the
# timezone metadata bug described below.
row['dt'].replace(tzinfo=pytz.utc),
# It's possible that no asset comes back here if our
# lookup date is from before any asset held the
# requested symbol. Mark such cases as NaN so that
# they get dropped in the next step.
) or numpy.nan
except SymbolNotFound:
asset = numpy.nan
# Assign the resolved asset to the cell
df.ix[row_idx, 'sid'] = asset
# Filter out rows containing symbols that we failed to find.
length_before_drop = len(df)
df = df[df['sid'].notnull()]
no_sid_count = length_before_drop - len(df)
if no_sid_count:
logger.warn(
"Dropped {} rows from fetched csv.".format(no_sid_count),
no_sid_count,
extra={'syslog': True},
)
else:
df['sid'] = df['symbol']
# Dates are localized to UTC when they come out of
# parse_date_str_series, but we need to re-localize them here because
# of a bug that wasn't fixed until
# https://github.com/pydata/pandas/pull/7092.
# We should be able to remove the call to tz_localize once we're on
# pandas 0.14.0
# We don't set 'dt' as the index until here because the Symbol parsing
# operations above depend on having a unique index for the dataframe,
# and the 'dt' column can contain multiple dates for the same entry.
df.drop_duplicates(["sid", "dt"])
df.set_index(['dt'], inplace=True)
df = df.tz_localize('UTC')
df.sort_index(inplace=True)
cols_to_drop = [self.date_column]
if self.symbol is None:
cols_to_drop.append(self.symbol_column)
df = df[df.columns.drop(cols_to_drop)]
if self.post_func:
df = self.post_func(df)
return df
def __iter__(self):
asset_cache = {}
for dt, series in self.df.iterrows():
if dt < self.start_date:
continue
if dt > self.end_date:
return
event = FetcherEvent()
# when dt column is converted to be the dataframe's index
# the dt column is dropped. So, we need to manually copy
# dt into the event.
event.dt = dt
for k, v in series.iteritems():
# convert numpy integer types to
# int. This assumes we are on a 64bit
# platform that will not lose information
# by casting.
# TODO: this is only necessary on the
# amazon qexec instances. would be good
# to figure out how to use the numpy dtypes
# without this check and casting.
if isinstance(v, numpy.integer):
v = int(v)
setattr(event, k, v)
# If it has start_date, then it's already an Asset
# object from asset_for_symbol, and we don't have to
# transform it any further. Checking for start_date is
# faster than isinstance.
if event.sid in asset_cache:
event.sid = asset_cache[event.sid]
elif hasattr(event.sid, 'start_date'):
# Clone for user algo code, if we haven't already.
asset_cache[event.sid] = event.sid
elif self.finder and isinstance(event.sid, int):
asset = self.finder.retrieve_asset(event.sid,
default_none=True)
if asset:
# Clone for user algo code.
event.sid = asset_cache[asset] = asset
elif self.mask:
# When masking drop all non-mappable values.
continue
elif self.symbol is None:
# If the event's sid property is an int we coerce
# it into an Equity.
event.sid = asset_cache[event.sid] = Equity(event.sid)
event.type = DATASOURCE_TYPE.CUSTOM
event.source_id = self.namestring
yield event
class PandasRequestsCSV(PandasCSV):
# maximum 100 megs to prevent DDoS
MAX_DOCUMENT_SIZE = (1024 * 1024) * 100
# maximum number of bytes to read in at a time
CONTENT_CHUNK_SIZE = 4096
def __init__(self,
url,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
special_params_checker=None,
**kwargs):
# Peel off extra requests kwargs, forwarding the remaining kwargs to
# the superclass.
# Also returns possible https updated url if sent to http quandl ds
# If url hasn't changed, will just return the original.
self._requests_kwargs, self.url =\
mask_requests_args(url,
params_checker=special_params_checker,
**kwargs)
remaining_kwargs = {
k: v for k, v in iteritems(kwargs)
if k not in self.requests_kwargs
}
self.namestring = type(self).__name__
super(PandasRequestsCSV, self).__init__(
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
**remaining_kwargs
)
self.fetch_size = None
self.fetch_hash = None
self.df = self.load_df()
self.special_params_checker = special_params_checker
@property
def requests_kwargs(self):
return self._requests_kwargs
def fetch_url(self, url):
info = "checking {url} with {params}"
logger.info(info.format(url=url, params=self.requests_kwargs))
# setting decode_unicode=True sometimes results in a
# UnicodeEncodeError exception, so instead we'll use
# pandas logic for decoding content
try:
response = requests.get(url, **self.requests_kwargs)
except requests.exceptions.ConnectionError:
raise Exception('Could not connect to %s' % url)
if not response.ok:
raise Exception('Problem reaching %s' % url)
elif response.is_redirect:
# On the offchance we don't catch a redirect URL
# in validation, this will catch it.
new_url = response.headers['location']
raise FetcherCSVRedirectError(
url=url,
new_url=new_url,
extra={
'old_url': url,
'new_url': new_url
}
)
content_length = 0
logger.info('{} connection established in {:.1f} seconds'.format(
url, response.elapsed.total_seconds()))
# use the decode_unicode flag to ensure that the output of this is
# a string, and not bytes.
for chunk in response.iter_content(self.CONTENT_CHUNK_SIZE,
decode_unicode=True):
if content_length > self.MAX_DOCUMENT_SIZE:
raise Exception('Document size too big.')
if chunk:
content_length += len(chunk)
yield chunk
return
def fetch_data(self):
# create a data frame directly from the full text of
# the response from the returned file-descriptor.
data = self.fetch_url(self.url)
fd = StringIO()
if isinstance(data, str):
fd.write(data)
else:
for chunk in data:
fd.write(chunk)
self.fetch_size = fd.tell()
fd.seek(0)
try:
# see if pandas can parse csv data
frames = read_csv(fd, **self.pandas_kwargs)
frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8'))
self.fetch_hash = frames_hash.hexdigest()
except pd.parser.CParserError:
# could not parse the data, raise exception
raise Exception('Error parsing remote CSV data.')
finally:
fd.close()
return frames | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/sources/requests_csv.py | requests_csv.py |
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
from numpy import concatenate
from lru import LRU
from pandas import isnull
from toolz import sliding_window
from six import with_metaclass
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.lib._int64window import AdjustedArrayWindow as Int64Window
from zipline.lib._float64window import AdjustedArrayWindow as Float64Window
from zipline.lib.adjustment import Float64Multiply, Float64Add
from zipline.utils.cache import ExpiringCache
from zipline.utils.math_utils import number_of_decimal_places
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import float64_dtype
from zipline.utils.pandas_utils import find_in_sorted_index, normalize_date
# Default number of decimal places used for rounding asset prices.
DEFAULT_ASSET_PRICE_DECIMALS = 3
class HistoryCompatibleUSEquityAdjustmentReader(object):
def __init__(self, adjustment_reader):
self._adjustments_reader = adjustment_reader
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != 'volume':
mergers = self._adjustments_reader.get_adjustments_for_sid(
'mergers', sid)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
'dividends', sid)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid(
'splits', sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == 'volume':
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0,
end_loc - 1,
0,
0,
ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs
class ContinuousFutureAdjustmentReader(object):
"""
Calculates adjustments for continuous futures, based on the
close and open of the contracts on the either side of each roll.
"""
def __init__(self,
trading_calendar,
asset_finder,
bar_reader,
roll_finders,
frequency):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._bar_reader = bar_reader
self._roll_finders = roll_finders
self._frequency = frequency
def load_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out
def _make_adjustment(self,
adjustment_type,
front_close,
back_close,
end_loc):
adj_base = back_close - front_close
if adjustment_type == 'mul':
adj_value = 1.0 + adj_base / front_close
adj_class = Float64Multiply
elif adjustment_type == 'add':
adj_value = adj_base
adj_class = Float64Add
return adj_class(0,
end_loc,
0,
0,
adj_value)
def _get_adjustments_in_range(self, cf, dts, field):
if field == 'volume' or field == 'sid':
return {}
if cf.adjustment is None:
return {}
rf = self._roll_finders[cf.roll_style]
partitions = []
rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1],
cf.offset)
tc = self._trading_calendar
adjs = {}
for front, back in sliding_window(2, rolls):
front_sid, roll_dt = front
back_sid = back[0]
dt = tc.previous_session_label(roll_dt)
if self._frequency == 'minute':
dt = tc.open_and_close_for_session(dt)[1]
roll_dt = tc.open_and_close_for_session(roll_dt)[0]
partitions.append((front_sid,
back_sid,
dt,
roll_dt))
for partition in partitions:
front_sid, back_sid, dt, roll_dt = partition
last_front_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(front_sid), dt)
last_back_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(back_sid), dt)
if isnull(last_front_dt) or isnull(last_back_dt):
continue
front_close = self._bar_reader.get_value(
front_sid, last_front_dt, 'close')
back_close = self._bar_reader.get_value(
back_sid, last_back_dt, 'close')
adj_loc = dts.searchsorted(roll_dt)
end_loc = adj_loc - 1
adj = self._make_adjustment(cf.adjustment,
front_close,
back_close,
end_loc)
try:
adjs[adj_loc].append(adj)
except KeyError:
adjs[adj_loc] = [adj]
return adjs
class SlidingWindow(object):
"""
Wrapper around an AdjustedArrayWindow which supports monotonically
increasing (by datetime) requests for a sized window of data.
Parameters
----------
window : AdjustedArrayWindow
Window of pricing data with prefetched values beyond the current
simulation dt.
cal_start : int
Index in the overall calendar at which the window starts.
"""
def __init__(self, window, size, cal_start, offset):
self.window = window
self.cal_start = cal_start
self.current = next(window)
self.offset = offset
self.most_recent_ix = self.cal_start + size
def get(self, end_ix):
"""
Returns
-------
out : A np.ndarray of the equity pricing up to end_ix after adjustments
and rounding have been applied.
"""
if self.most_recent_ix == end_ix:
return self.current
target = end_ix - self.cal_start - self.offset + 1
self.current = self.window.seek(target)
self.most_recent_ix = end_ix
return self.current
class HistoryLoader(with_metaclass(ABCMeta)):
"""
Loader for sliding history windows, with support for adjustments.
Parameters
----------
trading_calendar: TradingCalendar
Contains the grouping logic needed to assign minutes to periods.
reader : DailyBarReader, MinuteBarReader
Reader for pricing bars.
adjustment_reader : SQLiteAdjustmentReader
Reader for adjustment data.
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid')
def __init__(self, trading_calendar, reader, equity_adjustment_reader,
asset_finder,
roll_finders=None,
sid_cache_size=1000,
prefetch_length=0):
self.trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._reader = reader
self._adjustment_readers = {}
if equity_adjustment_reader is not None:
self._adjustment_readers[Equity] = \
HistoryCompatibleUSEquityAdjustmentReader(
equity_adjustment_reader)
if roll_finders:
self._adjustment_readers[ContinuousFuture] =\
ContinuousFutureAdjustmentReader(trading_calendar,
asset_finder,
reader,
roll_finders,
self._frequency)
self._window_blocks = {
field: ExpiringCache(LRU(sid_cache_size))
for field in self.FIELDS
}
self._prefetch_length = prefetch_length
@abstractproperty
def _frequency(self):
pass
@abstractproperty
def _calendar(self):
pass
@abstractmethod
def _array(self, start, end, assets, field):
pass
def _decimal_places_for_asset(self, asset, reference_date):
if isinstance(asset, Future) and asset.tick_size:
return number_of_decimal_places(asset.tick_size)
elif isinstance(asset, ContinuousFuture):
# Tick size should be the same for all contracts of a continuous
# future, so arbitrarily get the contract with next upcoming auto
# close date.
oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)
contract_sid = oc.contract_before_auto_close(reference_date.value)
if contract_sid is not None:
contract = self._asset_finder.retrieve_asset(contract_sid)
if contract.tick_size:
return number_of_decimal_places(contract.tick_size)
return DEFAULT_ASSET_PRICE_DECIMALS
def _ensure_sliding_windows(self, assets, dts, field,
is_perspective_after):
"""
Ensure that there is a Float64Multiply window for each asset that can
provide data for the given parameters.
If the corresponding window for the (assets, len(dts), field) does not
exist, then create a new one.
If a corresponding window does exist for (assets, len(dts), field), but
can not provide data for the current dts range, then create a new
one and replace the expired window.
Parameters
----------
assets : iterable of Assets
The assets in the window
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
see: `PricingHistoryLoader.history`
Returns
-------
out : list of Float64Window with sufficient data so that each asset's
window can provide `get` for the index corresponding with the last
value in `dts`
"""
end = dts[-1]
size = len(dts)
asset_windows = {}
needed_assets = []
cal = self._calendar
assets = self._asset_finder.retrieve_all(assets)
end_ix = find_in_sorted_index(cal, end)
for asset in assets:
try:
window = self._window_blocks[field].get(
(asset, size, is_perspective_after), end)
except KeyError:
needed_assets.append(asset)
else:
if end_ix < window.most_recent_ix:
# Window needs reset. Requested end index occurs before the
# end index from the previous history call for this window.
# Grab new window instead of rewinding adjustments.
needed_assets.append(asset)
else:
asset_windows[asset] = window
if needed_assets:
offset = 0
start_ix = find_in_sorted_index(cal, dts[0])
prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)
prefetch_end = cal[prefetch_end_ix]
prefetch_dts = cal[start_ix:prefetch_end_ix + 1]
if is_perspective_after:
adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)
adj_dts = cal[start_ix:adj_end_ix + 1]
else:
adj_dts = prefetch_dts
prefetch_len = len(prefetch_dts)
array = self._array(prefetch_dts, needed_assets, field)
if field == 'sid':
window_type = Int64Window
else:
window_type = Float64Window
view_kwargs = {}
if field == 'volume':
array = array.astype(float64_dtype)
for i, asset in enumerate(needed_assets):
adj_reader = None
try:
adj_reader = self._adjustment_readers[type(asset)]
except KeyError:
adj_reader = None
if adj_reader is not None:
adjs = adj_reader.load_adjustments(
[field], adj_dts, [asset])[0]
else:
adjs = {}
window = window_type(
array[:, i].reshape(prefetch_len, 1),
view_kwargs,
adjs,
offset,
size,
int(is_perspective_after),
self._decimal_places_for_asset(asset, dts[-1]),
)
sliding_window = SlidingWindow(window, size, start_ix, offset)
asset_windows[asset] = sliding_window
self._window_blocks[field].set(
(asset, size, is_perspective_after),
sliding_window,
prefetch_end)
return [asset_windows[asset] for asset in assets]
def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(assets,
dts,
field,
is_perspective_after)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
)
class DailyHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'daily'
@property
def _calendar(self):
return self._reader.sessions
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
class MinuteHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return 'minute'
@lazyval
def _calendar(self):
mm = self.trading_calendar.all_minutes
start = mm.searchsorted(self._reader.first_trading_day)
end = mm.searchsorted(self._reader.last_available_dt, side='right')
return mm[start:end]
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/history_loader.py | history_loader.py |
from abc import ABCMeta, abstractmethod
from numpy import (
full,
nan,
int64,
zeros
)
from six import iteritems, with_metaclass
from zipline.utils.memoize import lazyval
class AssetDispatchBarReader(with_metaclass(ABCMeta)):
"""
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
- asset_finder : zipline.assets.AssetFinder
- readers : dict
A dict mapping Asset type to the corresponding
[Minute|Session]BarReader
- last_available_dt : pd.Timestamp or None, optional
If not provided, infers it by using the min of the
last_available_dt values of the underlying readers.
"""
def __init__(
self,
trading_calendar,
asset_finder,
readers,
last_available_dt=None,
):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._readers = readers
self._last_available_dt = last_available_dt
for t, r in iteritems(self._readers):
assert trading_calendar == r.trading_calendar, \
"All readers must share target trading_calendar. " \
"Reader={0} for type={1} uses calendar={2} which does not " \
"match the desired shared calendar={3} ".format(
r, t, r.trading_calendar, trading_calendar)
@abstractmethod
def _dt_window_size(self, start_dt, end_dt):
pass
@property
def _asset_types(self):
return self._readers.keys()
def _make_raw_array_shape(self, start_dt, end_dt, num_sids):
return self._dt_window_size(start_dt, end_dt), num_sids
def _make_raw_array_out(self, field, shape):
if field != 'volume' and field != 'sid':
out = full(shape, nan)
else:
out = zeros(shape, dtype=int64)
return out
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def last_available_dt(self):
if self._last_available_dt is not None:
return self._last_available_dt
else:
return max(r.last_available_dt for r in self._readers.values())
@lazyval
def first_trading_day(self):
return min(r.first_trading_day for r in self._readers.values())
def get_value(self, sid, dt, field):
asset = self._asset_finder.retrieve_asset(sid)
r = self._readers[type(asset)]
return r.get_value(asset, dt, field)
def get_last_traded_dt(self, asset, dt):
r = self._readers[type(asset)]
return r.get_last_traded_dt(asset, dt)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
asset_types = self._asset_types
sid_groups = {t: [] for t in asset_types}
out_pos = {t: [] for t in asset_types}
assets = self._asset_finder.retrieve_all(sids)
for i, asset in enumerate(assets):
t = type(asset)
sid_groups[t].append(asset)
out_pos[t].append(i)
batched_arrays = {
t: self._readers[t].load_raw_arrays(fields,
start_dt,
end_dt,
sid_groups[t])
for t in asset_types if sid_groups[t]}
results = []
shape = self._make_raw_array_shape(start_dt, end_dt, len(sids))
for i, field in enumerate(fields):
out = self._make_raw_array_out(field, shape)
for t, arrays in iteritems(batched_arrays):
out[:, out_pos[t]] = arrays[i]
results.append(out)
return results
class AssetDispatchMinuteBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.minutes_in_range(start_dt, end_dt))
class AssetDispatchSessionBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.sessions_in_range(start_dt, end_dt))
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self.first_trading_day,
self.last_available_dt) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/dispatch_bar_reader.py | dispatch_bar_reader.py |
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
import tables
from six import with_metaclass
from toolz import keymap, valmap
from trading_calendars import get_calendar
from zipline.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal
)
from zipline.gens.sim_engine import NANOS_IN_MINUTE
from zipline.data.bar_reader import BarReader, NoDataForSid, NoDataOnDate
from zipline.data.us_equity_pricing import check_uint32_safe
from zipline.utils.cli import maybe_show_progress
from zipline.utils.compat import mappingproxy
from zipline.utils.memoize import lazyval
logger = logbook.Logger('MinuteBars')
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 1000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day,
dtype='datetime64[ns]')
deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]')
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True, box=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, '06')
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid))
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = np.nan_to_num(cols['open']) * scale_factor
scaled_highs = np.nan_to_num(cols['high']) * scale_factor
scaled_lows = np.nan_to_num(cols['low']) * scale_factor
scaled_closes = np.nan_to_num(cols['close']) * scale_factor
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
('open', scaled_opens),
('high', scaled_highs),
('low', scaled_lows),
('close', scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == 'raise':
raise
if invalid_data_behavior == 'warn':
logger.warn(
'Values for sid={}, col={} contain some too large for '
'uint32 (max={}), filtering them out',
sid, col_name, max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max)
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols['volume'].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : trading_calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = 'metadata.json'
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
Deprecated, but included for backwards compatibility:
first_trading_day : string
'YYYY-MM-DD' formatted representation of the first trading day
available in the dataset.
market_opens : list
List of int64 values representing UTC market opens as
minutes since epoch.
market_closes : list
List of int64 values representing UTC market closes as
minutes since epoch.
"""
calendar = self.calendar
slicer = calendar.schedule.index.slice_indexer(
self.start_session,
self.end_session,
)
schedule = calendar.schedule[slicer]
market_opens = schedule.market_open
market_closes = schedule.market_close
metadata = {
'version': self.version,
'ohlc_ratio': self.default_ohlc_ratio,
'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid,
'minutes_per_day': self.minutes_per_day,
'calendar_name': self.calendar.name,
'start_session': str(self.start_session.date()),
'end_session': str(self.end_session.date()),
# Write these values for backwards compatibility
'first_trading_day': str(self.start_session.date()),
'market_opens': (
market_opens.values.astype('datetime64[m]').
astype(np.int64).tolist()),
'market_closes': (
market_closes.values.astype('datetime64[m]').
astype(np.int64).tolist()),
}
with open(self.metadata_path(rootdir), 'w+') as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : trading_calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint32. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (1000).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint32.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ('open', 'high', 'low', 'close', 'volume')
def __init__(self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = (
calendar.schedule.index.slice_indexer(start_session, end_session))
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data['shape'][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=[
'open',
'high',
'low',
'close',
'volume'
],
expectedlen=self._expectedlen,
mode='w',
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode='a')
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint32)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if last_date == pd.NaT:
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[tds.slice_indexer(
start=last_date + tds.freq,
end=date)]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file.
"""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior='warn'):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
'open': df.open.values,
'high': df.high.values,
'low': df.low.values,
'close': df.close.values,
'volume': df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join("{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES)))
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction='previous')
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
last_minute_to_write = pd.Timestamp(dts[-1], tz='UTC')
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(dedent("""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()).format(last_date, input_first_day, sid))
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(all_minutes_in_window.values,
dts.astype('datetime64[ns]'))
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([
open_col,
high_col,
low_col,
close_col,
vol_col
])
table.flush()
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info(
"Truncating {0} at end_date={1}", file_name, date.date()
)
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
class BcolzMinuteBarReader(MinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters
----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ('open', 'high', 'low', 'close', 'volume')
DEFAULT_MINUTELY_SID_CACHE_SIZES = {
'close': 3000,
'open': 1550,
'high': 1550,
'low': 1550,
'volume': 1550,
}
assert set(FIELDS) == set(DEFAULT_MINUTELY_SID_CACHE_SIZES), \
"FIELDS should match DEFAULT_MINUTELY_SID_CACHE_SIZES keys"
# Wrap the defaults in proxy so that we don't accidentally mutate them in
# place in the constructor. If a user wants to change the defaults, they
# can do so by mutating DEFAULT_MINUTELY_SID_CACHE_SIZES.
_default_proxy = mappingproxy(DEFAULT_MINUTELY_SID_CACHE_SIZES)
def __init__(self, rootdir, sid_cache_sizes=_default_proxy):
self._rootdir = rootdir
metadata = self._get_metadata()
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values.\
astype('datetime64[m]').astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values.\
astype('datetime64[m]').astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = (
valmap(lambda x: 1.0 / x, ohlc_ratios))
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {
field: LRU(sid_cache_sizes[field])
for field in self.FIELDS
}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@property
def trading_calendar(self):
return self.calendar
@lazyval
def last_available_dt(self):
_, close = self.calendar.open_and_close_for_session(self._end_session)
return close
@property
def first_trading_day(self):
return self._start_session
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype('datetime64[m]')
market_closes = self._market_closes.values.astype('datetime64[m]')
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(
minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [(market_open, early_close)
for market_open, early_close
in zip(early_opens, early_closes)]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open)
+
self._minutes_per_day
-
1
)
data = (start_pos, end_pos)
itree[start_pos:end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
try:
carray = self._carrays[field][sid] = bcolz.carray(
rootdir=self._get_carray_path(sid, field),
mode='r',
)
except IOError:
raise NoDataForSid('No minute data for sid {}.'.format(sid))
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file('close', sid))
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, 'r')
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == 'volume':
return 0
else:
return np.nan
if field != 'volume':
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file('volume', asset)
start_date_minute = asset.start_date.value / NANOS_IN_MINUTE
dt_minute = dt.value / NANOS_IN_MINUTE
try:
# if we know of a dt before which this asset has no volume,
# don't look before that dt
earliest_dt_to_search = self._known_zero_volume_dict[asset.sid]
except KeyError:
earliest_dt_to_search = start_date_minute
if dt_minute < earliest_dt_to_search:
return -1
pos = find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minute,
earliest_dt_to_search,
volumes,
self._minutes_per_day,
)
if pos == -1:
# if we didn't find any volume before this dt, save it to avoid
# work in the future.
try:
self._known_zero_volume_dict[asset.sid] = max(
dt_minute,
self._known_zero_volume_dict[asset.sid]
)
except KeyError:
self._known_zero_volume_dict[asset.sid] = dt_minute
return pos
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values,
pos,
self._minutes_per_day
)
return pd.Timestamp(minute_epoch, tz='UTC', unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = (end_idx - start_idx + 1)
results = []
indices_to_exclude = self._exclusion_indices_for_range(
start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx:end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx:excl_stop - start_idx + 1]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
if field != 'volume':
out[:len(where), i][where] = (
values[where] * self._ohlc_ratio_inverse_for_sid(sid))
else:
out[:len(where), i][where] = values[where]
results.append(out)
return results
class MinuteBarUpdateReader(with_metaclass(ABCMeta, object)):
"""
Abstract base class for minute update readers.
"""
@abstractmethod
def read(self, dts, sids):
"""
Read and return pricing update data.
Parameters
----------
dts : DatetimeIndex
The minutes for which to read the pricing updates.
sids : iter[int]
The sids for which to read the pricing updates.
Returns
-------
data : iter[(int, DataFrame)]
Returns an iterable of ``sid`` to the corresponding OHLCV data.
"""
raise NotImplementedError()
class H5MinuteBarUpdateWriter(object):
"""
Writer for files containing minute bar updates for consumption by a writer
for a ``MinuteBarReader`` format.
Parameters
----------
path : str
The destination path.
complevel : int, optional
The HDF5 complevel, defaults to ``5``.
complib : str, optional
The HDF5 complib, defaults to ``zlib``.
"""
FORMAT_VERSION = 0
_COMPLEVEL = 5
_COMPLIB = 'zlib'
def __init__(self, path, complevel=None, complib=None):
self._complevel = complevel if complevel \
is not None else self._COMPLEVEL
self._complib = complib if complib \
is not None else self._COMPLIB
self._path = path
def write(self, frames):
"""
Write the frames to the target HDF5 file, using the format used by
``pd.Panel.to_hdf``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(self._path, 'w',
complevel=self._complevel, complib=self._complib) \
as store:
panel = pd.Panel.from_dict(dict(frames))
panel.to_hdf(store, 'updates')
with tables.open_file(self._path, mode='r+') as h5file:
h5file.set_node_attr('/', 'version', 0)
class H5MinuteBarUpdateReader(MinuteBarUpdateReader):
"""
Reader for minute bar updates stored in HDF5 files.
Parameters
----------
path : str
The path of the HDF5 file from which to source data.
"""
def __init__(self, path):
self._panel = pd.read_hdf(path)
def read(self, dts, sids):
panel = self._panel[sids, dts, :]
return panel.iteritems() | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/minute_bars.py | minute_bars.py |
from operator import itemgetter
import re
import numpy as np
import pandas as pd
get_unit_and_periods = itemgetter('unit', 'periods')
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
# The US Treasury actually has data going back further than this, but it's
# pretty rare to find pricing data going back that far, and there's no
# reason to make people download benchmarks back to 1950 that they'll never
# be able to use.
return pd.Timestamp('1980', tz='UTC')
def get_treasury_data(start_date, end_date):
return pd.read_csv(
"https://www.federalreserve.gov/datadownload/Output.aspx"
"?rel=H15"
"&series=bf17364827e38702b42a58cf8eaa3f78"
"&lastObs="
"&from=" # An unbounded query is ~2x faster than specifying dates.
"&to="
"&filetype=csv"
"&label=include"
"&layout=seriescolumn"
"&type=package",
skiprows=5, # First 5 rows are useless headers.
parse_dates=['Time Period'],
na_values=['ND'], # Presumably this stands for "No Data".
index_col=0,
).loc[
start_date:end_date
].dropna(
how='all'
).rename(
columns=parse_treasury_csv_column
).tz_localize('UTC') * 0.01 # Convert from 2.57% to 0.0257.
def dataconverter(s):
try:
return float(s) / 100
except:
return np.nan
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/treasuries.py | treasuries.py |
from operator import mul
from logbook import Logger
import numpy as np
from numpy import float64, int64, nan
import pandas as pd
from pandas import isnull
from six import iteritems
from six.moves import reduce
from zipline.assets import (
Asset,
AssetConvertible,
Equity,
Future,
PricingDataAssociable,
)
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.data.continuous_future_reader import (
ContinuousFutureSessionBarReader,
ContinuousFutureMinuteBarReader
)
from zipline.assets.roll_finder import (
CalendarRollFinder,
VolumeRollFinder
)
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.us_equity_pricing import NoDataOnDate
from zipline.utils.math_utils import (
nansum,
nanmean,
nanstd
)
from zipline.utils.memoize import remember_last, weak_lru_cache
from zipline.utils.pandas_utils import (
normalize_date,
timedelta_to_integral_minutes,
)
from zipline.errors import HistoryWindowStartsBeforeData
log = Logger('DataPortal')
BASE_FIELDS = frozenset([
"open",
"high",
"low",
"close",
"volume",
"price",
"contract",
"sid",
"last_traded",
])
OHLCV_FIELDS = frozenset([
"open", "high", "low", "close", "volume"
])
OHLCVP_FIELDS = frozenset([
"open", "high", "low", "close", "volume", "price"
])
HISTORY_FREQUENCIES = set(["1m", "1d"])
DEFAULT_MINUTE_HISTORY_PREFETCH = 1560
DEFAULT_DAILY_HISTORY_PREFETCH = 40
_DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
_DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
last_available_session : pd.Timestamp, optional
The last session to make available in session-level data.
last_available_minute : pd.Timestamp, optional
The last minute to make available in minute-level data.
"""
def __init__(self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None,
last_available_session=None,
last_available_minute=None,
minute_history_prefetch_length=_DEF_M_HIST_PREFETCH,
daily_history_prefetch_length=_DEF_D_HIST_PREFETCH):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_available_session = first_trading_day
if last_available_session:
self._last_available_session = last_available_session
else:
# Infer the last session from the provided readers.
last_sessions = [
reader.last_available_dt
for reader in [equity_daily_reader, future_daily_reader]
if reader is not None
]
if last_sessions:
self._last_available_session = min(last_sessions)
else:
self._last_available_session = None
if last_available_minute:
self._last_available_minute = last_available_minute
else:
# Infer the last minute from the provided readers.
last_minutes = [
reader.last_available_dt
for reader in [equity_minute_reader, future_minute_reader]
if reader is not None
]
if last_minutes:
self._last_available_minute = max(last_minutes)
else:
self._last_available_minute = None
aligned_equity_minute_reader = self._ensure_reader_aligned(
equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(
equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(
future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(
future_daily_reader)
self._roll_finders = {
'calendar': CalendarRollFinder(self.trading_calendar,
self.asset_finder),
}
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
aligned_minute_readers[ContinuousFuture] = \
ContinuousFutureMinuteBarReader(
aligned_future_minute_reader,
self._roll_finders,
)
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
self._roll_finders['volume'] = VolumeRollFinder(
self.trading_calendar,
self.asset_finder,
aligned_future_session_reader,
)
aligned_session_readers[ContinuousFuture] = \
ContinuousFutureSessionBarReader(
aligned_future_session_reader,
self._roll_finders,
)
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
self._last_available_minute,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
self._last_available_session,
)
self._pricing_readers = {
'minute': _dispatch_minute_reader,
'daily': _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=daily_history_prefetch_length,
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=minute_history_prefetch_length,
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(
self._first_trading_day
)
if self._first_trading_day is not None else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == 'minute':
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
elif reader.data_frequency == 'session':
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method='ffill')
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session,
sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in iteritems(group_dict):
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
for col_name in df.columns.difference(['sid']):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(
asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (field in BASE_FIELDS and
(isinstance(asset, (Asset, ContinuousFuture))))
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return \
self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def _get_single_asset_value(self,
session_label,
asset,
field,
dt,
data_frequency):
if self._is_extra_source(
asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if dt < asset.start_date or \
(data_frequency == "daily" and
session_label > asset.end_date) or \
(data_frequency == "minute" and
session_label > asset.end_date):
if field == "volume":
return 0
elif field == "contract":
return None
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
if field == "contract":
return self._get_current_contract(asset, session_label)
else:
return self._get_daily_spot_value(
asset, field, session_label,
)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, 'minute')
elif field == "price":
return self._get_minute_spot_value(
asset, "close", dt, ffill=True,
)
elif field == "contract":
return self._get_current_contract(asset, dt)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_spot_value(self, assets, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
# If 'assets' was not one of the expected types then it should be
# an iterable.
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected 'assets' value of type {}."
.format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
if assets_is_scalar:
return self._get_single_asset_value(
session_label,
assets,
field,
dt,
data_frequency,
)
else:
get_single_asset_value = self._get_single_asset_value
return [
get_single_asset_value(
session_label,
asset,
field,
dt,
data_frequency,
)
for asset in assets
]
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset
The asset or assets whose data is desired. This cannot be
an arbitrary AssetConvertible.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
return self._get_single_asset_value(
self.trading_calendar.minute_to_session_label(dt),
asset,
field,
dt,
data_frequency,
)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != 'volume' else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != 'volume':
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset, self._dividends_dict, "DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(self, asset, field, dt,
perspective_dt,
data_frequency,
spot_value=None):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field,
self._augmented_sources_map):
spot_value = self.get_spot_value(asset, field, perspective_dt,
data_frequency)
else:
spot_value = self.get_spot_value(asset, field, dt,
data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader('minute')
if not ffill:
try:
return reader.get_value(asset.sid, dt, column)
except NoDataOnDate:
if column != 'volume':
return np.nan
else:
return 0
# At this point the pairing of column='close' and ffill=True is
# assumed.
try:
# Optimize the best case scenario of a liquid asset
# returning a valid price.
result = reader.get_value(asset.sid, dt, column)
if not pd.isnull(result):
return result
except NoDataOnDate:
# Handling of no data for the desired date is done by the
# forward filling logic.
# The last trade may occur on a previous day.
pass
# If forward filling, we want the last minute with values (up to
# and including dt).
query_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(query_dt):
# no last traded dt, bail
return np.nan
result = reader.get_value(asset.sid, query_dt, column)
if (dt == query_dt) or (dt.date() == query_dt.date()):
return result
# the value we found came from a different day, so we have to
# adjust the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, query_dt,
dt, "minute", spot_value=result
)
def _get_daily_spot_value(self, asset, column, dt):
reader = self._get_pricing_reader('daily')
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
return reader.get_value(asset, dt, column)
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = reader.get_value(
asset, found_dt, "close"
)
if not isnull(value):
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute",
spot_value=value
)
else:
found_dt -= self.trading_calendar.day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.trading_calendar.all_sessions
end_loc = tds.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < self._first_trading_day_loc:
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[
self._first_trading_day_loc + bar_count
].date(),
)
return tds[start_loc:end_loc + 1]
def _get_history_daily_window(self,
assets,
end_dt,
bar_count,
field_to_use,
data_frequency):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None,
index=days_for_window,
columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(
data,
index=days_for_window,
columns=assets
)
def _get_history_daily_window_data(self,
assets,
days_for_window,
end_dt,
field_to_use,
data_frequency):
if data_frequency == 'daily':
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_data(
assets,
field_to_use,
days_for_window,
extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_data(
assets,
field_to_use,
days_for_window[0:-1]
)
if field_to_use == 'open':
minute_value = self._daily_aggregator.opens(
assets, end_dt)
elif field_to_use == 'high':
minute_value = self._daily_aggregator.highs(
assets, end_dt)
elif field_to_use == 'low':
minute_value = self._daily_aggregator.lows(
assets, end_dt)
elif field_to_use == 'close':
minute_value = self._daily_aggregator.closes(
assets, end_dt)
elif field_to_use == 'volume':
minute_value = self._daily_aggregator.volumes(
assets, end_dt)
elif field_to_use == 'sid':
minute_value = [
int(self._get_current_contract(asset, end_dt))
for asset in assets]
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _handle_minute_history_out_of_bounds(self, bar_count):
cal = self.trading_calendar
first_trading_minute_loc = (
cal.all_minutes.get_loc(
self._first_trading_minute
)
if self._first_trading_minute is not None else None
)
suggested_start_day = cal.minute_to_session_label(
cal.all_minutes[
first_trading_minute_loc + bar_count
] + cal.day
)
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day.date(),
)
def _get_history_minute_window(self, assets, end_dt, bar_count,
field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(
asset_minute_data,
index=minutes_for_window,
columns=assets
)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != 'sid':
raise ValueError("Invalid field: {0}".format(field))
if bar_count < 1:
raise ValueError(
"bar_count must be >= 1, but got {}".format(bar_count)
)
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(assets, end_dt, bar_count,
"close", data_frequency)
else:
df = self._get_history_daily_window(assets, end_dt, bar_count,
field, data_frequency)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count,
"close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count,
field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if ffill and field == "price":
if frequency == "1m":
ffill_data_frequency = 'minute'
elif frequency == "1d":
ffill_data_frequency = 'daily'
else:
raise Exception(
"Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
if ffill_data_frequency == 'daily' and data_frequency == 'minute':
# When we're looking for a daily value, but we haven't seen any
# volume in today's minute bars yet, we need to use the
# previous day's ffilled daily price. Using today's daily price
# could yield a value from later today.
history_start -= self.trading_calendar.day
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
ffill_data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=ffill_data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.ix[0, assets_with_leading_nan] = np.array(
initial_values,
dtype=np.float64
)
df.fillna(method='ffill', inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df
def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(assets,
minutes_for_window,
field,
False)
def _get_daily_window_data(self,
assets,
field,
days_in_window,
extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != 'sid' else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(assets,
days_in_window,
field,
extra_slot)
if extra_slot:
return_array[:len(return_array) - 1, :] = data
else:
return_array[:len(data)] = data
return return_array
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[sid] = self._adjustment_reader.\
get_adjustments_for_sid(table_name, sid)
return adjustments
def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?",
(seconds,)).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [(self.asset_finder.retrieve_asset(split[0]), split[1])
for split in splits]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT * FROM stock_dividend_payouts WHERE sid = ? AND "
"ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\
fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append({
"declared_date": dividend_tuple[1],
"ex_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[3], unit="s"),
"payment_sid": dividend_tuple[4],
"ratio": dividend_tuple[5],
"record_date": pd.Timestamp(dividend_tuple[6], unit="s"),
"sid": dividend_tuple[7]
})
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or \
(field in self._augmented_sources_map and
asset in self._augmented_sources_map[field])
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]['sid']
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
# cache size picked somewhat loosely. this code exists purely to
# handle deprecated API.
@weak_lru_cache(20)
def _get_minute_count_for_transform(self, ending_minute, days_count):
# This function works in three steps.
# Step 1. Count the minutes from ``ending_minute`` to the start of its
# session.
# Step 2. Count the minutes from the prior ``days_count - 1`` sessions.
# Step 3. Return the sum of the results from steps (1) and (2).
# Example (NYSE Calendar)
# ending_minute = 2016-12-28 9:40 AM US/Eastern
# days_count = 3
# Step 1. Calculate that there are 10 minutes in the ending session.
# Step 2. Calculate that there are 390 + 210 = 600 minutes in the prior
# two sessions. (Prior sessions are 2015-12-23 and 2015-12-24.)
# 2015-12-24 is a half day.
# Step 3. Return 600 + 10 = 610.
cal = self.trading_calendar
ending_session = cal.minute_to_session_label(
ending_minute,
direction="none", # It's an error to pass a non-trading minute.
)
# Assume that calendar days are always full of contiguous minutes,
# which means we can just take 1 + (number of minutes between the last
# minute and the start of the session). We add one so that we include
# the ending minute in the total.
ending_session_minute_count = timedelta_to_integral_minutes(
ending_minute - cal.open_and_close_for_session(ending_session)[0]
) + 1
if days_count == 1:
# We just need sessions for the active day.
return ending_session_minute_count
# XXX: We're subtracting 2 here to account for two offsets:
# 1. We only want ``days_count - 1`` sessions, since we've already
# accounted for the ending session above.
# 2. The API of ``sessions_window`` is to return one more session than
# the requested number. I don't think any consumers actually want
# that behavior, but it's the tested and documented behavior right
# now, so we have to request one less session than we actually want.
completed_sessions = cal.sessions_window(
cal.previous_session_label(ending_session),
2 - days_count,
)
completed_sessions_minute_count = (
self.trading_calendar.minutes_count_for_sessions_in_range(
completed_sessions[0],
completed_sessions[-1]
)
)
return ending_session_minute_count + completed_sessions_minute_count
def get_simple_transform(self, asset, transform_name, dt, data_frequency,
bars=None):
if transform_name == "returns":
# returns is always calculated over the last 2 days, regardless
# of the simulation's data frequency.
hst = self.get_history_window(
[asset],
dt,
2,
"1d",
"price",
data_frequency,
ffill=True,
)[asset]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
if bars is None:
raise ValueError("bars cannot be None!")
if data_frequency == "minute":
freq_str = "1m"
calculated_bar_count = int(self._get_minute_count_for_transform(
dt, bars
))
else:
freq_str = "1d"
calculated_bar_count = bars
price_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"price",
data_frequency,
ffill=True,
)[asset]
if transform_name == "mavg":
return nanmean(price_arr)
elif transform_name == "stddev":
return nanstd(price_arr, ddof=1)
elif transform_name == "vwap":
volume_arr = self.get_history_window(
[asset],
dt,
calculated_bar_count,
freq_str,
"volume",
data_frequency,
ffill=True,
)[asset]
vol_sum = nansum(volume_arr)
try:
ret = nansum(price_arr * volume_arr) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session,
continuous_future.offset)
oc = self.asset_finder.get_ordered_contracts(
continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain)
def _get_current_contract(self, continuous_future, dt):
rf = self._roll_finders[continuous_future.roll_style]
contract_sid = rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset)
if contract_sid is None:
return None
return self.asset_finder.retrieve_asset(contract_sid)
@property
def adjustment_reader(self):
return self._adjustment_reader | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/data_portal.py | data_portal.py |
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['open'],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['open'],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high')
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'high')
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'high')
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['high'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['high'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low')
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'low')
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(
asset, dt, 'low')
val = np.nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['low'],
after_last,
dt,
[asset],
)[0].T
val = np.nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['low'],
market_open,
dt,
[asset],
)[0].T
val = np.nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close')
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
['close'],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'close')
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(
asset, dt, 'close')
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume')
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'volume')
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(
asset, dt, 'volume')
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz='UTC')
window = self._minute_reader.load_raw_arrays(
['volume'],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
['volume'],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(session_closes.values)
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(
self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.get_last_traded_dt(asset, dt))
class ReindexBarReader(with_metaclass(ABCMeta)):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(self,
trading_calendar,
reader,
first_trading_session,
last_trading_session):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == 'volume':
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session,
self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids)
else:
inner_results = None
for i, field in enumerate(fields):
if field != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(
start_dt, end_dt) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/resample.py | resample.py |
import os
import logbook
import pandas as pd
from six.moves.urllib_error import HTTPError
from trading_calendars import get_calendar
from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from ..utils.paths import (
cache_root,
data_root,
)
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'SPY':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name, environ=None):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root(environ)
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY',
environ=None):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from IEX Trading. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to 'SPY', the ticker
for the S&P 500, provided by IEX Trading.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
if trading_day is None:
trading_day = get_calendar('NYSE').day
if trading_days is None:
trading_days = get_calendar('NYSE').all_sessions
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# we will fill missing benchmark data through latest trading date
last_date = trading_days[trading_days.get_loc(now, method='ffill')]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
environ,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
environ,
)
# combine dt indices and reindex using ffill then bfill
all_dt = br.index.union(tc.index)
br = br.reindex(all_dt, method='ffill').fillna(method='bfill')
tc = tc.reindex(all_dt, method='ffill').fillna(method='bfill')
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day,
environ=None):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
"""
filename = get_benchmark_filename(symbol)
data = _load_cached_data(filename, first_date, last_date, now, 'benchmark',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading benchmark data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
try:
data = get_benchmark_returns(symbol, first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('Failed to cache the new benchmark returns')
raise
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected benchmark data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date - trading_day,
last_date=last_date
)
return data
def ensure_treasury_data(symbol, first_date, last_date, now, environ=None):
"""
Ensure we have treasury data from treasury module associated with
`symbol`.
Parameters
----------
symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
symbol, INDEX_MAPPING['SPY'],
)
first_date = max(first_date, loader_module.earliest_possible_date())
data = _load_cached_data(filename, first_date, last_date, now, 'treasury',
environ)
if data is not None:
return data
# If no cached data was found or it was missing any dates then download the
# necessary data.
logger.info(
('Downloading treasury data for {symbol!r} '
'from {first_date} to {last_date}'),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(get_data_filepath(filename, environ))
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn(
("Still don't have expected treasury data for {symbol!r} "
"from {first_date} to {last_date} after redownload!"),
symbol=symbol,
first_date=first_date,
last_date=last_date
)
return data
def _load_cached_data(filename, first_date, last_date, now, resource_name,
environ=None):
if resource_name == 'benchmark':
def from_csv(path):
return pd.read_csv(
path,
parse_dates=[0],
index_col=0,
header=None,
# Pass squeeze=True so that we get a series instead of a frame.
squeeze=True,
).tz_localize('UTC')
else:
def from_csv(path):
return pd.read_csv(
path,
parse_dates=[0],
index_col=0,
).tz_localize('UTC')
# Path for the cache.
path = get_data_filepath(filename, environ)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = from_csv(path)
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new {resource} data because a "
"download succeeded at {time}.",
resource=resource_name,
time=last_download_time,
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].",
path=path,
error=e,
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n",
start=first_date,
end=last_date,
path=path,
)
return None
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/loader.py | loader.py |
import numpy as np
import pandas as pd
from zipline.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_date,
end_date,
asset.offset
)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date)
)
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll_date is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != 'volume' and column != 'sid':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unionining the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol,
start_session,
end_session, asset.offset)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(
roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1]))
for column in columns:
if column != 'volume':
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != 'sid':
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid])[0][:, 0]
else:
result = int(sid)
out[start_loc:end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = (rf.get_contract_center(continuous_future.root_symbol,
dt,
continuous_future.offset))
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = (rf.get_contract_center(asset.root_symbol,
dt,
asset.offset))
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/continuous_future_reader.py | continuous_future_reader.py |
from errno import ENOENT
from functools import partial
from os import remove
import sqlite3
import warnings
from bcolz import (
carray,
ctable,
)
from collections import namedtuple
import logbook
import numpy as np
from numpy import (
array,
int64,
float64,
full,
iinfo,
integer,
issubdtype,
nan,
uint32,
)
from pandas import (
DataFrame,
DatetimeIndex,
isnull,
NaT,
read_csv,
read_sql,
to_datetime,
Timestamp,
)
from six import (
iteritems,
string_types,
viewkeys,
)
from toolz import compose
from trading_calendars import get_calendar
from zipline.data.session_bars import SessionBarReader
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataOnDate,
)
from zipline.utils.functional import apply
from zipline.utils.input_validation import (
expect_element,
preprocess,
)
from zipline.utils.numpy_utils import iNaT
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_conn
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
from ._adjustments import load_adjustments_from_sqlite
logger = logbook.Logger('UsEquityPricing')
OHLC = frozenset(['open', 'high', 'low', 'close'])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
'open', 'high', 'low', 'close', 'volume', 'day', 'id'
)
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
'effective_date': integer,
'ratio': float,
'sid': integer,
}
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers'])
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'amount': float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
'sid': integer,
'ex_date': integer,
'declared_date': integer,
'record_date': integer,
'pay_date': integer,
'payment_sid': integer,
'ratio': float,
}
UINT32_MAX = iinfo(uint32).max
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError(
"Value %s from column '%s' is too large" % (value, colname)
)
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != 'ignore':
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == 'raise':
raise ValueError(
'%d values out of bounds for uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
)
if invalid_data_behavior == 'warn':
warnings.warn(
'Ignoring %d values because they are out of bounds for'
' uint32: %r' % (
mv.sum(), df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can
be read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : zipline.utils.calendar.trading_calendar
Calendar to use to compute asset calendar offsets.
start_session: pd.Timestamp
Midnight UTC session label.
end_session: pd.Timestamp
Midnight UTC session label.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarReader
"""
_csv_dtypes = {
'open': float64,
'high': float64,
'low': float64,
'close': float64,
'volume': float64,
}
def __init__(self, filename, calendar, start_session, end_session):
self._filename = filename
if start_session != end_session:
if not calendar.is_session(start_session):
raise ValueError(
"Start session %s is invalid!" % start_session
)
if not calendar.is_session(end_session):
raise ValueError(
"End session %s is invalid!" % end_session
)
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(self,
data,
assets=None,
show_progress=False,
invalid_data_behavior='warn'):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
(
(sid, self.to_ctable(df, invalid_data_behavior))
for sid, df in data
),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self,
asset_map,
show_progress=False,
invalid_data_behavior='warn'):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=['day'],
index_col='day',
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in iteritems(asset_map)),
assets=viewkeys(asset_map),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint32))
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
sessions = self._calendar.sessions_in_range(
self._start_session, self._end_session
)
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError('unknown asset id %r' % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == 'id':
# We know what the content of this column is, so don't
# bother reading it.
columns['id'].append(
full((nrows,), asset_id, dtype='uint32'),
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
table_day_to_session = compose(
self._calendar.minute_to_session_label,
partial(Timestamp, unit='s', tz='UTC'),
)
asset_first_day = table_day_to_session(table['day'][0])
asset_last_day = table_day_to_session(table['day'][-1])
asset_sessions = sessions[
sessions.slice_indexer(asset_first_day, asset_last_day)
]
assert len(table) == len(asset_sessions), (
'Got {} rows for daily bars table with first day={}, last '
'day={}, expected {} rows.\n'
'Missing sessions: {}\n'
'Extra sessions: {}'.format(
len(table),
asset_first_day.date(),
asset_last_day.date(),
len(asset_sessions),
asset_sessions.difference(
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
)
).tolist(),
to_datetime(
np.array(table['day']),
unit='s',
utc=True,
).difference(asset_sessions).tolist(),
)
)
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
calendar_offset[asset_key] = sessions.get_loc(asset_first_day)
# This writes the table to disk.
full_table = ctable(
columns=[
columns[colname]
for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS
],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode='w',
)
full_table.attrs['first_trading_day'] = (
earliest_date if earliest_date is not None else iNaT
)
full_table.attrs['first_row'] = first_row
full_table.attrs['last_row'] = last_row
full_table.attrs['calendar_offset'] = calendar_offset
full_table.attrs['calendar_name'] = self._calendar.name
full_table.attrs['start_session_ns'] = self._start_session.value
full_table.attrs['end_session_ns'] = self._end_session.value
full_table.flush()
return full_table
@expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'})
def to_ctable(self, raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC)
processed = (raw_data[list(OHLC)] * 1000).astype('uint32')
dates = raw_data.index.values.astype('datetime64[s]')
check_uint32_safe(dates.max().view(np.int64), 'day')
processed['day'] = dates.astype('uint32')
processed['volume'] = raw_data.volume.astype('uint32')
return ctable.fromdataframe(processed)
class BcolzDailyBarReader(SessionBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which; below, the data is read by reading a
slice from the carray per asset. above, the data is read by pulling
all of the data for all assets into memory and then indexing into that
array for each day and asset pair. Used to tune performance of reads
when using a small or large number of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
start_session_ns: int
Epoch ns of the first session used in this dataset.
end_session_ns: int
Epoch ns of the last session used in this dataset.
calendar_name: str
String identifier of trading calendar used (ie, "NYSE").
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
Notes
------
A Bcolz CTable is comprised of Columns and Attributes.
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
See Also
--------
zipline.data.us_equity_pricing.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode='r')
@lazyval
def sessions(self):
if 'calendar' in self._table.attrs.attrs:
# backwards compatibility with old formats, will remove
return DatetimeIndex(self._table.attrs['calendar'], tz='UTC')
else:
cal = get_calendar(self._table.attrs['calendar_name'])
start_session_ns = self._table.attrs['start_session_ns']
start_session = Timestamp(start_session_ns, tz='UTC')
end_session_ns = self._table.attrs['end_session_ns']
end_session = Timestamp(end_session_ns, tz='UTC')
sessions = cal.sessions_in_range(start_session, end_session)
return sessions
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in iteritems(
self._table.attrs['first_row'],
)
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in iteritems(
self._table.attrs['last_row'],
)
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in iteritems(
self._table.attrs['calendar_offset'],
)
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(
self._table.attrs['first_trading_day'],
unit='s',
tz='UTC'
)
except KeyError:
return None
@lazyval
def trading_calendar(self):
if 'calendar_name' in self._table.attrs.attrs:
return get_calendar(self._table.attrs['calendar_name'])
else:
return None
@property
def last_available_dt(self):
return self.sessions[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
# Assumes that the given dates are actually in calendar.
start_idx = self.sessions.get_loc(start_date)
end_idx = self.sessions.get_loc(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col('volume')
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataBeforeDate:
return NaT
except NoDataAfterDate:
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
continue
except NoDataOnDate:
return NaT
if volumes[ix] != 0:
return search_day
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
else:
return NaT
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self.sessions.get_loc(day)
except:
raise NoDataOnDate("day={0} is outside of calendar={1}".format(
day, self.sessions))
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataBeforeDate(
"No data on or before day={0} for sid={1}".format(
day, sid))
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataAfterDate(
"No data on or after day={0} for sid={1}".format(
day, sid))
return ix
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != 'volume':
if price == 0:
return nan
else:
return price * 0.001
else:
return price
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
equity_daily_bar_reader : BcolzDailyBarReader
Daily bar reader to use for dividend writes.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
def __init__(self,
conn_or_path,
equity_daily_bar_reader,
calendar,
overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, string_types):
if overwrite:
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
self.uri = conn_or_path
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._equity_daily_bar_reader = equity_daily_bar_reader
self._calendar = calendar
def _write(self, tablename, expected_dtypes, frame):
if frame is None or frame.empty:
# keeping the dtypes correct for empty frames is not easy
frame = DataFrame(
np.array([], dtype=list(expected_dtypes.items())),
)
else:
if frozenset(frame.columns) != frozenset(expected_dtypes):
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s" % (
set(expected_dtypes),
frame.columns.tolist(),
)
)
actual_dtypes = frame.dtypes
for colname, expected in iteritems(expected_dtypes):
actual = actual_dtypes[colname]
if not issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column"
" '{colname}', but got '{actual}'.".format(
expected=expected,
colname=colname,
actual=actual,
),
)
frame.to_sql(
tablename,
self.conn,
if_exists='append',
chunksize=50000,
)
def write_frame(self, tablename, frame):
if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s" % (
tablename,
SQLITE_ADJUSTMENT_TABLENAMES,
)
)
if not (frame is None or frame.empty):
frame = frame.copy()
frame['effective_date'] = frame['effective_date'].values.astype(
'datetime64[s]',
).astype('int64')
return self._write(
tablename,
SQLITE_ADJUSTMENT_COLUMN_DTYPES,
frame,
)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
return self._write(
'dividend_payouts',
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def write_stock_dividend_payouts(self, frame):
return self._write(
'stock_dividend_payouts',
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None or dividends.empty:
return DataFrame(np.array(
[],
dtype=[
('sid', uint32),
('effective_date', uint32),
('ratio', float64),
],
))
ex_dates = dividends.ex_date.values
sids = dividends.sid.values
amounts = dividends.amount.values
ratios = full(len(amounts), nan)
equity_daily_bar_reader = self._equity_daily_bar_reader
effective_dates = full(len(amounts), -1, dtype=int64)
calendar = self._calendar
# Calculate locs against a tz-naive cal, as the ex_dates are tz-
# naive.
#
# TODO: A better approach here would be to localize ex_date to
# the tz of the calendar, but currently get_indexer does not
# preserve tz of the target when method='bfill', which throws
# off the comparison.
tz_naive_calendar = calendar.tz_localize(None)
day_locs = tz_naive_calendar.get_indexer(ex_dates, method='bfill')
for i, amount in enumerate(amounts):
sid = sids[i]
ex_date = ex_dates[i]
day_loc = day_locs[i]
prev_close_date = calendar[day_loc - 1]
try:
prev_close = equity_daily_bar_reader.get_value(
sid, prev_close_date, 'close')
if not isnull(prev_close):
ratio = 1.0 - amount / prev_close
ratios[i] = ratio
# only assign effective_date when data is found
effective_dates[i] = ex_date
except NoDataOnDate:
logger.warn("Couldn't compute ratio for dividend %s" % {
'sid': sid,
'ex_date': ex_date,
'amount': amount,
})
continue
# Create a mask to filter out indices in the effective_date, sid, and
# ratio vectors for which a ratio was not calculable.
effective_mask = effective_dates != -1
effective_dates = effective_dates[effective_mask]
effective_dates = effective_dates.astype('datetime64[ns]').\
astype('datetime64[s]').astype(uint32)
sids = sids[effective_mask]
ratios = ratios[effective_mask]
return DataFrame({
'sid': sids,
'effective_date': effective_dates,
'ratio': ratios,
})
def _write_dividends(self, dividends):
if dividends is None:
dividend_payouts = None
else:
dividend_payouts = dividends.copy()
dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['record_date'] = \
dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['declared_date'] = \
dividend_payouts['declared_date'].values.\
astype('datetime64[s]').astype(integer)
dividend_payouts['pay_date'] = \
dividend_payouts['pay_date'].values.astype('datetime64[s]').\
astype(integer)
self.write_dividend_payouts(dividend_payouts)
def _write_stock_dividends(self, stock_dividends):
if stock_dividends is None:
stock_dividend_payouts = None
else:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts['ex_date'] = \
stock_dividend_payouts['ex_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['record_date'] = \
stock_dividend_payouts['record_date'].values.\
astype('datetime64[s]').astype(integer)
stock_dividend_payouts['declared_date'] = \
stock_dividend_payouts['declared_date'].\
values.astype('datetime64[s]').astype(integer)
stock_dividend_payouts['pay_date'] = \
stock_dividend_payouts['pay_date'].\
values.astype('datetime64[s]').astype(integer)
self.write_stock_dividend_payouts(stock_dividend_payouts)
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame('dividends', dividend_ratios)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def write(self,
splits=None,
mergers=None,
dividends=None,
stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.us_equity_pricing.SQLiteAdjustmentReader
"""
self.write_frame('splits', splits)
self.write_frame('mergers', mergers)
self.write_dividend_data(dividends, stock_dividends)
self.conn.execute(
"CREATE INDEX splits_sids "
"ON splits(sid)"
)
self.conn.execute(
"CREATE INDEX splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute(
"CREATE INDEX mergers_sids "
"ON mergers(sid)"
)
self.conn.execute(
"CREATE INDEX mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividends_sid "
"ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
)
def close(self):
self.conn.close()
UNPAID_QUERY_TEMPLATE = """
SELECT sid, amount, pay_date from dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
Dividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date'])
UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """
SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
StockDividend = namedtuple(
'StockDividend',
['asset', 'payment_asset', 'ratio', 'pay_date'])
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
See Also
--------
:class:`zipline.data.us_equity_pricing.SQLiteAdjustmentWriter`
"""
@preprocess(conn=coerce_string_to_conn(require_exists=True))
def __init__(self, conn):
self.conn = conn
# Given the tables in the adjustments.db file, dict which knows which
# col names contain dates that have been coerced into ints.
self._datetime_int_cols = {
'dividend_payouts': ('declared_date', 'ex_date', 'pay_date',
'record_date'),
'dividends': ('effective_date',),
'mergers': ('effective_date',),
'splits': ('effective_date',),
'stock_dividend_payouts': ('declared_date', 'ex_date', 'pay_date',
'record_date')
}
def load_adjustments(self, columns, dates, assets):
return load_adjustments_from_sqlite(
self.conn,
list(columns),
dates,
assets,
)
def get_adjustments_for_sid(self, table_name, sid):
t = (sid,)
c = self.conn.cursor()
adjustments_for_sid = c.execute(
"SELECT effective_date, ratio FROM %s WHERE sid = ?" %
table_name, t).fetchall()
c.close()
return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]]
for adjustment in
adjustments_for_sid]
def get_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
div = Dividend(
asset_finder.retrieve_asset(row[0]),
row[1], Timestamp(row[2], unit='s', tz='UTC'))
divs.append(div)
c.close()
return divs
def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
stock_divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(
",".join(['?' for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
stock_div = StockDividend(
asset_finder.retrieve_asset(row[0]), # asset
asset_finder.retrieve_asset(row[1]), # payment_asset
row[2],
Timestamp(row[3], unit='s', tz='UTC'))
stock_divs.append(stock_div)
c.close()
return stock_divs
def unpack_db_to_component_dfs(self, convert_dates=False):
"""Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime.
"""
def _get_df_from_table(table_name, date_cols):
# Dates are stored in second resolution as ints in adj.db tables.
# Need to specifically convert them as UTC, not local time.
kwargs = (
{'parse_dates': {col: {'unit': 's', 'utc': True}
for col in date_cols}
}
if convert_dates
else {}
)
return read_sql(
'select * from "{}"'.format(table_name),
self.conn,
index_col='index',
**kwargs
).rename_axis(None)
return {
t_name: _get_df_from_table(
t_name,
date_cols
)
for t_name, date_cols in self._datetime_int_cols.items()
} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/us_equity_pricing.py | us_equity_pricing.py |
import pandas as pd
from zipline.data.data_portal import DataPortal
from logbook import Logger
log = Logger('DataPortalLive')
class DataPortalLive(DataPortal):
def __init__(self, broker, *args, **kwargs):
self.broker = broker
super(DataPortalLive, self).__init__(*args, **kwargs)
def get_last_traded_dt(self, asset, dt, data_frequency):
return self.broker.get_last_traded_dt(asset)
def get_spot_value(self, assets, field, dt, data_frequency):
return self.broker.get_spot_value(assets, field, dt, data_frequency)
def get_history_window(self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True):
# This method is responsible for merging the ingested historical data
# with the real-time collected data through the Broker.
# DataPortal.get_history_window() is called with ffill=False to mark
# the missing fields with NaNs. After merge on the historical and
# real-time data the missing values (NaNs) are filled based on their
# next available values in the requested time window.
#
# Warning: setting ffill=True in DataPortal.get_history_window() call
# results a wrong behavior: The last available value reported by
# get_spot_value() will be used to fill the missing data - which is
# always representing the current spot price presented by Broker.
historical_bars = super(DataPortalLive, self).get_history_window(
assets, end_dt, bar_count, frequency, field, data_frequency,
ffill=False)
realtime_bars = self.broker.get_realtime_bars(
assets, frequency)
# Broker.get_realtime_history() returns the asset as level 0 column,
# open, high, low, close, volume returned as level 1 columns.
# To filter for field the levels needs to be swapped
realtime_bars = realtime_bars.swaplevel(0, 1, axis=1)
ohlcv_field = 'close' if field == 'price' else field
# TODO: end_dt is ignored when historical & realtime bars are merged.
# Should not cause issues as end_dt is set to current time in live
# trading, but would be more proper if merge would make use of it.
combined_bars = historical_bars.combine_first(
realtime_bars[ohlcv_field])
if ffill and field == 'price':
# Simple forward fill is not enough here as the last ingested
# value might be outside of the requested time window. That case
# the time series starts with NaN and forward filling won't help.
# To provide values for such cases we backward fill.
# Backward fill as a second operation will have no effect if the
# forward-fill was successful.
combined_bars.fillna(method='ffill', inplace=True)
combined_bars.fillna(method='bfill', inplace=True)
return combined_bars[-bar_count:]
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset
The asset or assets whose data is desired. This cannot be
an arbitrary AssetConvertible.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
if data_frequency == 'minute':
data_frequency = '1m'
elif data_frequency == 'daily':
data_frequency = '1d'
prices = self.broker.get_realtime_bars([asset], data_frequency)
if field == 'last_traded':
return pd.Timestamp(prices[asset][-1:].index.get_values()[0])
elif field == 'volume':
return prices[asset][field][-1] * 100
elif field == 'price':
return prices[asset]['close'][-1]
else:
return prices[asset][field][-1] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/data_portal_live.py | data_portal_live.py |
from abc import ABCMeta, abstractmethod, abstractproperty
from six import with_metaclass
class NoDataOnDate(Exception):
"""
Raised when a spot price cannot be found for the sid and date.
"""
pass
class NoDataBeforeDate(NoDataOnDate):
pass
class NoDataAfterDate(NoDataOnDate):
pass
class NoDataForSid(Exception):
"""
Raised when the requested sid is missing from the pricing data.
"""
pass
class BarReader(with_metaclass(ABCMeta, object)):
@abstractproperty
def data_frequency(self):
pass
@abstractmethod
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
pass
@abstractproperty
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
pass
@abstractproperty
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
pass
@abstractproperty
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
pass
@abstractmethod
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
pass
@abstractmethod
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
pass | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bar_reader.py | bar_reader.py |
import pandas as pd
import six
from toolz import curry
from toolz.curried.operator import add as prepend
COLUMN_NAMES = {
"V39063": '1month',
"V39065": '3month',
"V39066": '6month',
"V39067": '1year',
"V39051": '2year',
"V39052": '3year',
"V39053": '5year',
"V39054": '7year',
"V39055": '10year',
# Bank of Canada refers to this as 'Long' Rate, approximately 30 years.
"V39056": '30year',
}
BILL_IDS = ['V39063', 'V39065', 'V39066', 'V39067']
BOND_IDS = ['V39051', 'V39052', 'V39053', 'V39054', 'V39055', 'V39056']
@curry
def _format_url(instrument_type,
instrument_ids,
start_date,
end_date,
earliest_allowed_date):
"""
Format a URL for loading data from Bank of Canada.
"""
return (
"http://www.bankofcanada.ca/stats/results/csv"
"?lP=lookup_{instrument_type}_yields.php"
"&sR={restrict}"
"&se={instrument_ids}"
"&dF={start}"
"&dT={end}".format(
instrument_type=instrument_type,
instrument_ids='-'.join(map(prepend("L_"), instrument_ids)),
restrict=earliest_allowed_date.strftime("%Y-%m-%d"),
start=start_date.strftime("%Y-%m-%d"),
end=end_date.strftime("%Y-%m-%d"),
)
)
format_bill_url = _format_url('tbill', BILL_IDS)
format_bond_url = _format_url('bond', BOND_IDS)
def load_frame(url, skiprows):
"""
Load a DataFrame of data from a Bank of Canada site.
"""
return pd.read_csv(
url,
skiprows=skiprows,
skipinitialspace=True,
na_values=["Bank holiday", "Not available"],
parse_dates=["Date"],
index_col="Date",
).dropna(how='all') \
.tz_localize('UTC') \
.rename(columns=COLUMN_NAMES)
def check_known_inconsistencies(bill_data, bond_data):
"""
There are a couple quirks in the data provided by Bank of Canada.
Check that no new quirks have been introduced in the latest download.
"""
inconsistent_dates = bill_data.index.sym_diff(bond_data.index)
known_inconsistencies = [
# bill_data has an entry for 2010-02-15, which bond_data doesn't.
# bond_data has an entry for 2006-09-04, which bill_data doesn't.
# Both of these dates are bank holidays (Flag Day and Labor Day,
# respectively).
pd.Timestamp('2006-09-04', tz='UTC'),
pd.Timestamp('2010-02-15', tz='UTC'),
# 2013-07-25 comes back as "Not available" from the bills endpoint.
# This date doesn't seem to be a bank holiday, but the previous
# calendar implementation dropped this entry, so we drop it as well.
# If someone cares deeply about the integrity of the Canadian trading
# calendar, they may want to consider forward-filling here rather than
# dropping the row.
pd.Timestamp('2013-07-25', tz='UTC'),
]
unexpected_inconsistences = inconsistent_dates.drop(known_inconsistencies)
if len(unexpected_inconsistences):
in_bills = bill_data.index.difference(bond_data.index).difference(
known_inconsistencies
)
in_bonds = bond_data.index.difference(bill_data.index).difference(
known_inconsistencies
)
raise ValueError(
"Inconsistent dates for Canadian treasury bills vs bonds. \n"
"Dates with bills but not bonds: {in_bills}.\n"
"Dates with bonds but not bills: {in_bonds}.".format(
in_bills=in_bills,
in_bonds=in_bonds,
)
)
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
today = pd.Timestamp('now', tz='UTC').normalize()
# Bank of Canada only has the last 10 years of data at any given time.
return today.replace(year=today.year - 10)
def get_treasury_data(start_date, end_date):
bill_data = load_frame(
format_bill_url(start_date, end_date, start_date),
# We skip fewer rows here because we query for fewer bill fields,
# which makes the header smaller.
skiprows=18,
)
bond_data = load_frame(
format_bond_url(start_date, end_date, start_date),
skiprows=22,
)
check_known_inconsistencies(bill_data, bond_data)
# dropna('any') removes the rows for which we only had data for one of
# bills/bonds.
out = pd.concat([bond_data, bill_data], axis=1).dropna(how='any')
assert set(out.columns) == set(six.itervalues(COLUMN_NAMES))
# Multiply by 0.01 to convert from percentages to expected output format.
return out * 0.01 | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/treasuries_can.py | treasuries_can.py |
import datetime
import quandl
import os
from zipline.data.bundles.core import load
import pandas as pd
import numpy as np
from zipline.utils.paths import zipline_root
from logbook import Logger, StderrHandler
from zipline.pipeline.factors import CustomFactor
from os import environ
import sys
import pickle
KERNEL_BUNDLE = 'sharadar-prices' # fundamentals are downloaded for symbols from this bundle
DATA_FILE = zipline_root() + '/data/SF1.npy' # the file name to be used when storing this in ~/.zipline/data
FUNDAMENTAL_FIELDS_FILE = zipline_root() + '/data/SF1.pkl'
log = Logger('quandl_fundamentals.py')
def set_api_key():
"""read QUANDL_API_KEY env variable, and set it."""
try:
api_key = environ["QUANDL_API_KEY"]
except KeyError:
print("could not read the env variable: QUANDL_API_KEY")
sys.exit()
quandl.ApiConfig.api_key = api_key
class SparseDataFactor(CustomFactor):
"""Abstract Base Class to be used for computing sparse data.
The data is packed and persisted into a NumPy binary data file
in a previous step.
This class must be subclassed with class variable 'outputs' set. The fields
in 'outputs' should match those persisted."""
inputs = []
window_length = 1
def __init__(self, *args, **kwargs):
self.time_index = None
self.curr_date = None # date for which time_index is accurate
self.last_date_seen = 0 # earliest date possible
self.data = None
self.data_path = "please_specify_.npy_file"
def bs(self, arr):
"""Binary Search"""
if len(arr) == 1:
if self.curr_date < arr[0]:
return 0
else: return 1
mid = int(len(arr) / 2)
if self.curr_date < arr[mid]:
return self.bs(arr[:mid])
else:
return mid + self.bs(arr[mid:])
def bs_sparse_time(self, sid):
"""For each security find the best range in the sparse data."""
dates_for_sid = self.data.date[sid]
if np.isnan(dates_for_sid[0]):
return 0
# do a binary search of the dates array finding the index
# where self.curr_date will lie.
non_nan_dates = dates_for_sid[~np.isnan(dates_for_sid)]
return self.bs(non_nan_dates) - 1
def cold_start(self, today, assets):
if self.data is None:
# need the change allow_pickle=True due to allow_pickle
# default value change after numpy upgrade
self.data = np.load(self.data_path, allow_pickle=True)
self.M = self.data.date.shape[1]
# for each sid, do binary search of date array to find current index
# the results can be shared across all factors that inherit from SparseDataFactor
# this sets an array of ints: time_index
self.time_index = np.full(self.N, -1, np.dtype('int64'))
self.curr_date = today.value
for asset in assets: # asset is numpy.int64
self.time_index[asset] = self.bs_sparse_time(asset)
def update_time_index(self, today, assets):
"""Ratchet update.
for each asset check if today >= dates[self.time_index]
if so then increment self.time_index[asset.sid] += 1"""
ind_p1 = self.time_index.copy()
np.add.at(ind_p1, ind_p1 != (self.M - 1), 1)
sids_to_increment = today.value >= self.data.date[np.arange(self.N), ind_p1]
sids_not_max = self.time_index != (self.M - 1) # create mask of non-maxed
self.time_index[sids_to_increment & sids_not_max] += 1
self.curr_date = today.value
def compute(self, today, assets, out, *arrays, **params):
# for each asset in assets determine index from date (today)
if self.time_index is None or today < self.last_date_seen:
self.cold_start(today, assets)
else:
self.update_time_index(today, assets)
self.last_date_seen = today
ti_used_today = self.time_index[assets]
for field in self.__class__.outputs:
out[field][:] = self.data[field][assets, ti_used_today]
class Fundamentals(SparseDataFactor):
params = ('algo_bundle',)
try:
with open(FUNDAMENTAL_FIELDS_FILE, 'rb') as f:
outputs = pickle.load(f)
except:
outputs = []
def __init__(self, *args, **kwargs):
super(Fundamentals, self).__init__(*args, **kwargs)
self.data_path = DATA_FILE
bundle_data = load(KERNEL_BUNDLE)
self.kernel_sids = bundle_data.asset_finder.sids
self.kernel_assets = bundle_data.asset_finder.retrieve_all(self.kernel_sids)
self.N = len(self.kernel_assets)
self.algo_bundle = kwargs['algo_bundle'] if 'algo_bundle' in kwargs else KERNEL_BUNDLE
self.alternative_bundle = self.algo_bundle != KERNEL_BUNDLE
self.kernel_sid_symbol_map = {}
self.algo_to_kernel_sids = {}
if self.alternative_bundle:
for asset in self.kernel_assets:
self.kernel_sid_symbol_map[asset.symbol] = asset.sid
bundle_data = load(self.algo_bundle)
self.algo_sids = bundle_data.asset_finder.sids
self.algo_assets = bundle_data.asset_finder.retrieve_all(self.algo_sids)
for asset in self.algo_assets:
if asset.symbol in self.kernel_sid_symbol_map:
self.algo_to_kernel_sids[asset.sid] = self.kernel_sid_symbol_map[asset.symbol]
def algo_to_kernel_assets(self, assets):
kernel_assets = [self.algo_to_kernel_sids[asset] for asset in assets if asset in self.algo_to_kernel_sids]
return pd.Int64Index(kernel_assets)
def compute(self, today, assets, out, *arrays, **params):
if self.alternative_bundle:
algo_to_kernel_assets = self.algo_to_kernel_assets (assets)
if self.time_index is None or today < self.last_date_seen:
self.cold_start(today, algo_to_kernel_assets)
else:
self.update_time_index(today, algo_to_kernel_assets)
self.last_date_seen = today
ti_used_today = self.time_index[algo_to_kernel_assets]
for field in self.__class__.outputs:
out[field][algo_to_kernel_assets] = self.data[field][algo_to_kernel_assets, ti_used_today]
else:
super(Fundamentals, self).compute(today, assets, out, *arrays, **params)
def get_tickers_from_bundle(bundle_name):
"""Gets a list of tickers from a given bundle"""
bundle_data = load(bundle_name, os.environ, None)
# we can request equities or futures separately changing the filters parameter
all_sids = bundle_data.asset_finder.sids
# retreive all assets in the bundle
all_assets = bundle_data.asset_finder.retrieve_all(all_sids)
return [a.symbol for a in all_assets]
def download_fundamendals_data (bundle,
start_date = '2007-01-01',
end_date = datetime.datetime.today().strftime('%Y-%m-%d'),
tickers = None,
dataset = 'SHARADAR/SF1',
fields = None,
dimensions = None,
drop_dimensions = ('MRT', 'MRQ', 'MRY'),
data_file = DATA_FILE,
):
tickers_universe = get_tickers_from_bundle(bundle)
N = len (tickers_universe)
tickers = tickers if tickers else tickers_universe
log.info (f"Downloading data for {len(tickers) if tickers else 'ALL'} tickers")
header_columns = ['ticker',
'dimension',
'datekey',
'reportperiod',
'lastupdated',
'calendardate']
df = quandl.get_table(dataset,
calendardate={'gte': start_date, 'lte': end_date},
ticker=tickers,
qopts={'columns': header_columns + fields} if fields else None,
paginate=True)
df = df.rename(columns={'datekey': 'Date'}).set_index('Date')
dfs = [None] * N
fields = [f.upper() for f in df.columns if f not in header_columns]
dimensions = dimensions if dimensions \
else [d for d in df['dimension'].unique() if not d in drop_dimensions]
max_len = -1
for i, ticker in enumerate (tickers):
log.info (f"Pre-processing {ticker} ({i+1} / {len (tickers)})...")
ticker_df = df[df.ticker==ticker]
ticker_series = []
for field in fields:
for dim in dimensions:
field_dim_series = ticker_df[ticker_df.dimension==dim][field.lower()]
field_dim_series.name = field + '_' + dim
ticker_series.append (field_dim_series)
ticker_processed_df = pd.concat(ticker_series, axis=1)
max_len = max(max_len, ticker_processed_df.shape[0])
dfs[tickers_universe.index(ticker)] = ticker_processed_df
log.info ("Packing data...")
dtypes = [('date', '<f8')]
fundamental_fields = [f'{f}_{d}' for f in fields for d in dimensions]
with open(FUNDAMENTAL_FIELDS_FILE, 'wb') as f:
pickle.dump(fundamental_fields, f)
buff = np.full((len(fundamental_fields)+1, N, max_len), np.nan)
for field in fundamental_fields:
dtypes.append((field, '<f8'))
data = np.recarray(shape=(N, max_len), buf=buff, dtype=dtypes)
for i, df in enumerate(dfs):
if df is None:
continue
else:
df = pd.DataFrame(df)
ind_len = df.index.shape[0]
data.date[i, :ind_len] = df.index
for field in fundamental_fields:
data[field][i, :ind_len] = df[field]
log.info (f"Saving to {data_file}...")
data.dump(data_file) # can be read back with np.load()
log.info ("Done!")
return data
def download (bundle = KERNEL_BUNDLE,
start_date = '2013-01-01',
tickers = None,
fields = None,
dimensions = None,
):
"""
this method is a top-level executor of the download
download volume could be reduced by setting start_date, tickers, fields, dimensions parameters
with all parameters set as default will need couple of hours to complete the task
for each field it gets each dimension available - thus returns fields X dimension values
:param bundle: bundle which to be used to get the universe of tickers, sharadar-prices by default
:param start_date: first date of the set
:param tickers: list of tickers, all tickers by default
:param fields: list of fields, all fields by default
:param dimensions: list of dimensions, all dimensions by default (skipping MRs)
"""
log.info(f"Downloading fundamentals data since {start_date}")
set_api_key()
data = download_fundamendals_data(bundle = bundle,
start_date = start_date,
tickers = tickers,
fields = fields,
dimensions = dimensions,
)
return data
def test ():
start_date = '2015-01-01'
fields = [
'netinc',
'marketcap',
]
dimensions = [
'ARQ',
]
tickers = [
'AAPL',
'MSFT',
'PAAS',
]
data = download(tickers = tickers,
fields = fields,
start_date = start_date,
dimensions = dimensions,
)
return data
def download_all (start_date = '2013-01-01'):
"""
this is the top-level executor of the fundamentals download - just downloads everything since 2007
you may want to schedule download_all to be executed daily within out-of-market hours
"""
data = download(start_date=start_date)
return data
if __name__ == '__main__':
download_all() | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/quandl_fundamentals.py | quandl_fundamentals.py |
from io import BytesIO
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from zipline.data.bundles import core as bundles # looking in .zipline/extensions.py
import numpy as np
from zipline.utils.paths import zipline_root
import pickle
# Code from:
# Quantopian Zipline Issues:
# "Cannot find data bundle during ingest #2275"
# https://github.com/quantopian/zipline/issues/2275
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SEP.csv?'
)
QUANDL_EQUITIES_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SEP.csv?'
)
QUANDL_FUNDS_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SFP.csv?'
)
QUANDL_URLS = [dict(url=QUANDL_EQUITIES_DATA_URL, desc='Equities'),
dict(url=QUANDL_FUNDS_DATA_URL, desc='Funds'),
]
EXCLUSIONS_FILE = zipline_root() + '/data/exclusions.pkl'
@bundles.register('sharadar-ext')
def sharadar_prices_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
###ticker2sid_map = {}
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
# raw_data.index = pd.DatetimeIndex(raw_data.date)
###ajjc changes
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
# read in Dividend History
# ajjc pharrin----------------------
###uv = raw_data.symbol.unique() # get unique m_tickers (Zacks primary key)
# iterate over all the unique securities and pack data, and metadata
# for writing
# counter of valid securites, this will be our primary key
###sec_counter = 0
###for tkr in uv:
### #df_tkr = raw_data[raw_data['symbol'] == tkr]
### ticker2sid_map[tkr] = sec_counter # record the sid for use later
### sec_counter += 1
### dfd = pd.read_csv(file_name, index_col='date',
### parse_dates=['date'], na_values=['NA'])
# drop rows where dividends == 0.0
raw_data = raw_data[raw_data["dividends"] != 0.0]
raw_data.set_index(['date', 'sid'], inplace=True)
# raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.date
# raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.date
raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.index.get_level_values('date')
raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.index.get_level_values('date')
# raw_data.loc[:, 'sid'] = raw_data.loc[:, 'symbol'].apply(lambda x: ticker2sid_map[x])
raw_data = raw_data.rename(columns={'dividends': 'amount'})
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume','symbol'], axis=1)
raw_data.reset_index(inplace=True)
raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'symbol', 'date'], axis=1)
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'lastupdated', 'ticker', 'closeunadj'], axis=1)
# # format dfd to have sid
adjustment_writer.write(dividends=raw_data)
# ajjc ----------------------------------
def format_metadata_url(api_key, url = QUANDL_DATA_URL):
""" Build the query URL for Quandl Prices metadata.
"""
query_params = [('api_key', api_key), ('qopts.export', 'true')]
return (
url + urlencode(query_params)
)
def load_data_table(file,
index_col,
show_progress=False,
check_exclusions=True):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'dividends',
##'closeunadj',
##'lastupdated' #prune last two columns for zipline bundle load
],
)
data_table.rename(
columns={
'ticker': 'symbol'
},
inplace=True,
copy=False,
)
initial_size = data_table.size
if check_exclusions:
data_table = data_table[~data_table.symbol.isin(load_exclusions())]
if show_progress:
log.info(f"Excluding {initial_size - data_table.size} \
({(initial_size - data_table.size)/initial_size*100:.1f}%) lines based on exclusions.pkl")
return data_table
def fetch_data_table(api_key,
show_progress,
retries,
check_exclusions=True):
for _ in range(retries):
data_tables = []
try:
for url in QUANDL_URLS:
if show_progress:
log.info(f'Downloading Sharadar Price metadata for: {url["desc"]}.')
metadata = pd.read_csv(
format_metadata_url(api_key, url=url["url"])
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading Prices table from Quandl Sharadar"
)
else:
raw_file = download_without_progress(table_url)
data_table = load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
check_exclusions=check_exclusions,
)
data_tables.append(data_table)
return pd.concat(data_tables)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info('Generating asset metadata.')
data = data.groupby(
by='symbol'
).agg(
{'date': [np.min, np.max]}
)
data.reset_index(inplace=True)
data['start_date'] = data.date.amin
data['end_date'] = data.date.amax
del data['date']
data.columns = data.columns.get_level_values(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1)
return data
def parse_pricing_and_vol(data,
sessions,
symbol_map):
for asset_id, symbol in iteritems(symbol_map):
asset_data = data.xs(
symbol,
level=1
).reindex(
sessions.tz_localize(None)
).fillna(0.0)
yield asset_id, asset_data
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
def load_exclusions():
EXCLUSIONS_FILE = zipline_root() + '/data/exclusions.pkl'
try:
with open(EXCLUSIONS_FILE, 'rb') as f:
exclusions = pickle.load(f)
except:
exclusions = []
return exclusions
register_calendar_alias("sharadar-ext", "NYSE") | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/sharadar_ext.py | sharadar_ext.py |
import os
import sys
from logbook import Logger, StreamHandler
from numpy import empty
from pandas import DataFrame, read_csv, Index, Timedelta, NaT
from trading_calendars import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
handler = StreamHandler(sys.stdout, format_string=" | {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
class CSVDIRBundle:
"""
Wrapper class to call csvdir_bundle with provided
list of time frames and a path to the csvdir directory
"""
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
self.tframes,
self.csvdir)
@bundles.register("csvdir")
def csvdir_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get('CSVDIR')
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError("'daily' and 'minute' directories "
"not found in '%s'" % csvdir)
divs_splits = {'divs': DataFrame(columns=['sid', 'amount',
'ex_date', 'record_date',
'declared_date', 'pay_date']),
'splits': DataFrame(columns=['sid', 'ratio',
'effective_date'])}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(item.split('.csv')[0]
for item in os.listdir(ddir)
if '.csv' in item)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [('start_date', 'datetime64[ns]'),
('end_date', 'datetime64[ns]'),
('auto_close_date', 'datetime64[ns]'),
('symbol', 'object')]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == 'minute':
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(_pricing_iter(ddir, symbols, metadata,
divs_splits, show_progress),
show_progress=show_progress)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
metadata['exchange'] = "CSVDIR"
asset_db_writer.write(equities=metadata)
divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int)
divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int)
adjustment_writer.write(splits=divs_splits['splits'],
dividends=divs_splits['divs'])
def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress):
with maybe_show_progress(symbols, show_progress,
label='Loading custom pricing data: ') as it:
files = os.listdir(csvdir)
for sid, symbol in enumerate(it):
logger.debug('%s: sid %s' % (symbol, sid))
try:
fname = [fname for fname in files
if '%s.csv' % symbol in fname][0]
except IndexError:
raise ValueError("%s.csv file is not in %s" % (symbol, csvdir))
dfr = read_csv(os.path.join(csvdir, fname),
parse_dates=[0],
infer_datetime_format=True,
index_col=0).sort_index()
start_date = dfr.index[0]
end_date = dfr.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
if 'split' in dfr.columns:
tmp = 1. / dfr[dfr['split'] != 1.0]['split']
split = DataFrame(data=tmp.index.tolist(),
columns=['effective_date'])
split['ratio'] = tmp.tolist()
split['sid'] = sid
splits = divs_splits['splits']
index = Index(range(splits.shape[0],
splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
divs_splits['splits'] = splits.append(split)
if 'dividend' in dfr.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = dfr[dfr['dividend'] != 0.0]['dividend']
div = DataFrame(data=tmp.index.tolist(), columns=['ex_date'])
div['record_date'] = NaT
div['declared_date'] = NaT
div['pay_date'] = NaT
div['amount'] = tmp.tolist()
div['sid'] = sid
divs = divs_splits['divs']
ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
divs_splits['divs'] = divs.append(div)
yield sid, dfr
register_calendar_alias("CSVDIR", "NYSE") | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/csvdir.py | csvdir.py |
from collections import namedtuple
import errno
import os
import shutil
import warnings
from contextlib2 import ExitStack
import click
import pandas as pd
from trading_calendars import get_calendar
from toolz import curry, complement, take
from ..us_equity_pricing import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
)
from zipline.assets import AssetDBWriter, AssetFinder, ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import downgrade
from zipline.utils.cache import (
dataframe_cache,
working_dir,
working_file,
)
from zipline.utils.compat import mappingproxy
from zipline.utils.input_validation import ensure_timestamp, optionally
import zipline.utils.paths as pth
from zipline.utils.preprocess import preprocess
def asset_db_path(bundle_name, timestr, environ=None, db_version=None):
return pth.data_path(
asset_db_relative(bundle_name, timestr, environ, db_version),
environ=environ,
)
def minute_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
minute_equity_relative(bundle_name, timestr, environ),
environ=environ,
)
def daily_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
daily_equity_relative(bundle_name, timestr, environ),
environ=environ,
)
def adjustment_db_path(bundle_name, timestr, environ=None):
return pth.data_path(
adjustment_db_relative(bundle_name, timestr, environ),
environ=environ,
)
def cache_path(bundle_name, environ=None):
return pth.data_path(
cache_relative(bundle_name, environ),
environ=environ,
)
def adjustment_db_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'adjustments.sqlite'
def cache_relative(bundle_name, timestr, environ=None):
return bundle_name, '.cache'
def daily_equity_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'daily_equities.bcolz'
def minute_equity_relative(bundle_name, timestr, environ=None):
return bundle_name, timestr, 'minute_equities.bcolz'
def asset_db_relative(bundle_name, timestr, environ=None, db_version=None):
db_version = ASSET_DB_VERSION if db_version is None else db_version
return bundle_name, timestr, 'assets-%d.sqlite' % db_version
def to_bundle_ingest_dirname(ts):
"""Convert a pandas Timestamp into the name of the directory for the
ingestion.
Parameters
----------
ts : pandas.Timestamp
The time of the ingestions
Returns
-------
name : str
The name of the directory for this ingestion.
"""
return ts.isoformat().replace(':', ';')
def from_bundle_ingest_dirname(cs):
"""Read a bundle ingestion directory name into a pandas Timestamp.
Parameters
----------
cs : str
The name of the directory.
Returns
-------
ts : pandas.Timestamp
The time when this ingestion happened.
"""
return pd.Timestamp(cs.replace(';', ':'))
def ingestions_for_bundle(bundle, environ=None):
return sorted(
(from_bundle_ingest_dirname(ing)
for ing in os.listdir(pth.data_path([bundle], environ))
if not pth.hidden(ing)),
reverse=True,
)
RegisteredBundle = namedtuple(
'RegisteredBundle',
['calendar_name',
'start_session',
'end_session',
'minutes_per_day',
'ingest',
'create_writers']
)
BundleData = namedtuple(
'BundleData',
'asset_finder equity_minute_bar_reader equity_daily_bar_reader '
'adjustment_reader',
)
BundleCore = namedtuple(
'BundleCore',
'bundles register unregister ingest load clean',
)
class UnknownBundle(click.ClickException, LookupError):
"""Raised if no bundle with the given name was registered.
"""
exit_code = 1
def __init__(self, name):
super(UnknownBundle, self).__init__(
'No bundle registered with the name %r' % name,
)
self.name = name
def __str__(self):
return self.message
class BadClean(click.ClickException, ValueError):
"""Exception indicating that an invalid argument set was passed to
``clean``.
Parameters
----------
before, after, keep_last : any
The bad arguments to ``clean``.
See Also
--------
clean
"""
def __init__(self, before, after, keep_last):
super(BadClean, self).__init__(
'Cannot pass a combination of `before` and `after` with'
'`keep_last`. Got: before=%r, after=%r, keep_n=%r\n' % (
before,
after,
keep_last,
),
)
def __str__(self):
return self.message
def _make_bundle_core():
"""Create a family of data bundle functions that read from the same
bundle mapping.
Returns
-------
bundles : mappingproxy
The mapping of bundles to bundle payloads.
register : callable
The function which registers new bundles in the ``bundles`` mapping.
unregister : callable
The function which deregisters bundles from the ``bundles`` mapping.
ingest : callable
The function which downloads and write data for a given data bundle.
load : callable
The function which loads the ingested bundles back into memory.
clean : callable
The function which cleans up data written with ``ingest``.
"""
_bundles = {} # the registered bundles
# Expose _bundles through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another bundle.
bundles = mappingproxy(_bundles)
@curry
def register(name,
f,
calendar_name='NYSE',
start_session=None,
end_session=None,
minutes_per_day=390,
create_writers=True):
"""Register a data bundle ingest function.
Parameters
----------
name : str
The name of the bundle.
f : callable
The ingest function. This function will be passed:
environ : mapping
The environment this is being run with.
asset_db_writer : AssetDBWriter
The asset db writer to write into.
minute_bar_writer : BcolzMinuteBarWriter
The minute bar writer to write into.
daily_bar_writer : BcolzDailyBarWriter
The daily bar writer to write into.
adjustment_writer : SQLiteAdjustmentWriter
The adjustment db writer to write into.
calendar : trading_calendars.TradingCalendar
The trading calendar to ingest for.
start_session : pd.Timestamp
The first session of data to ingest.
end_session : pd.Timestamp
The last session of data to ingest.
cache : DataFrameCache
A mapping object to temporarily store dataframes.
This should be used to cache intermediates in case the load
fails. This will be automatically cleaned up after a
successful load.
show_progress : bool
Show the progress for the current load where possible.
calendar_name : str, optional
The name of a calendar used to align bundle data.
Default is 'NYSE'.
start_session : pd.Timestamp, optional
The first session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the first_session of the calendar is used.
end_session : pd.Timestamp, optional
The last session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the last_session of the calendar is used.
minutes_per_day : int, optional
The number of minutes in each normal trading day.
create_writers : bool, optional
Should the ingest machinery create the writers for the ingest
function. This can be disabled as an optimization for cases where
they are not needed, like the ``quantopian-quandl`` bundle.
Notes
-----
This function my be used as a decorator, for example:
.. code-block:: python
@register('quandl')
def quandl_ingest_function(...):
...
See Also
--------
zipline.data.bundles.bundles
"""
if name in bundles:
warnings.warn(
'Overwriting bundle with name %r' % name,
stacklevel=3,
)
# NOTE: We don't eagerly compute calendar values here because
# `register` is called at module scope in zipline, and creating a
# calendar currently takes between 0.5 and 1 seconds, which causes a
# noticeable delay on the zipline CLI.
_bundles[name] = RegisteredBundle(
calendar_name=calendar_name,
start_session=start_session,
end_session=end_session,
minutes_per_day=minutes_per_day,
ingest=f,
create_writers=create_writers,
)
return f
def unregister(name):
"""Unregister a bundle.
Parameters
----------
name : str
The name of the bundle to unregister.
Raises
------
UnknownBundle
Raised when no bundle has been registered with the given name.
See Also
--------
zipline.data.bundles.bundles
"""
try:
del _bundles[name]
except KeyError:
raise UnknownBundle(name)
def ingest(name,
environ=os.environ,
timestamp=None,
assets_versions=(),
show_progress=False):
"""Ingest data for a given bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. By default this is os.environ.
timestamp : datetime, optional
The timestamp to use for the load.
By default this is the current time.
assets_versions : Iterable[int], optional
Versions of the assets db to which to downgrade.
show_progress : bool, optional
Tell the ingest function to display the progress where possible.
"""
try:
bundle = bundles[name]
except KeyError:
raise UnknownBundle(name)
calendar = get_calendar(bundle.calendar_name)
start_session = bundle.start_session
end_session = bundle.end_session
if start_session is None or start_session < calendar.first_session:
start_session = calendar.first_session
if end_session is None or end_session > calendar.last_session:
end_session = calendar.last_session
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestamp = timestamp.tz_convert('utc').tz_localize(None)
timestr = to_bundle_ingest_dirname(timestamp)
cachepath = cache_path(name, environ=environ)
pth.ensure_directory(pth.data_path([name, timestr], environ=environ))
pth.ensure_directory(cachepath)
with dataframe_cache(cachepath, clean_on_failure=False) as cache, \
ExitStack() as stack:
# we use `cleanup_on_failure=False` so that we don't purge the
# cache directory if the load fails in the middle
if bundle.create_writers:
wd = stack.enter_context(working_dir(
pth.data_path([], environ=environ))
)
daily_bars_path = wd.ensure_dir(
*daily_equity_relative(
name, timestr, environ=environ,
)
)
daily_bar_writer = BcolzDailyBarWriter(
daily_bars_path,
calendar,
start_session,
end_session,
)
# Do an empty write to ensure that the daily ctables exist
# when we create the SQLiteAdjustmentWriter below. The
# SQLiteAdjustmentWriter needs to open the daily ctables so
# that it can compute the adjustment ratios for the dividends.
daily_bar_writer.write(())
minute_bar_writer = BcolzMinuteBarWriter(
wd.ensure_dir(*minute_equity_relative(
name, timestr, environ=environ)
),
calendar,
start_session,
end_session,
minutes_per_day=bundle.minutes_per_day,
)
assets_db_path = wd.getpath(*asset_db_relative(
name, timestr, environ=environ,
))
asset_db_writer = AssetDBWriter(assets_db_path)
adjustment_db_writer = stack.enter_context(
SQLiteAdjustmentWriter(
wd.getpath(*adjustment_db_relative(
name, timestr, environ=environ)),
BcolzDailyBarReader(daily_bars_path),
calendar.all_sessions,
overwrite=True,
)
)
else:
daily_bar_writer = None
minute_bar_writer = None
asset_db_writer = None
adjustment_db_writer = None
if assets_versions:
raise ValueError('Need to ingest a bundle that creates '
'writers in order to downgrade the assets'
' db.')
bundle.ingest(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_db_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
pth.data_path([name, timestr], environ=environ),
)
for version in sorted(set(assets_versions), reverse=True):
version_path = wd.getpath(*asset_db_relative(
name, timestr, environ=environ, db_version=version,
))
with working_file(version_path) as wf:
shutil.copy2(assets_db_path, wf.path)
downgrade(wf.path, version)
def most_recent_data(bundle_name, timestamp, environ=None):
"""Get the path to the most recent data after ``date``for the
given bundle.
Parameters
----------
bundle_name : str
The name of the bundle to lookup.
timestamp : datetime
The timestamp to begin searching on or before.
environ : dict, optional
An environment dict to forward to zipline_root.
"""
if bundle_name not in bundles:
raise UnknownBundle(bundle_name)
try:
candidates = os.listdir(
pth.data_path([bundle_name], environ=environ),
)
return pth.data_path(
[bundle_name,
max(
filter(complement(pth.hidden), candidates),
key=from_bundle_ingest_dirname,
)],
environ=environ,
)
except (ValueError, OSError) as e:
if getattr(e, 'errno', errno.ENOENT) != errno.ENOENT:
raise
raise ValueError(
'no data for bundle {bundle!r} on or before {timestamp}\n'
'maybe you need to run: $ zipline ingest -b {bundle}'.format(
bundle=bundle_name,
timestamp=timestamp,
),
)
def load(name, environ=os.environ, timestamp=None):
"""Loads a previously ingested bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. Defaults of os.environ.
timestamp : datetime, optional
The timestamp of the data to lookup.
Defaults to the current time.
Returns
-------
bundle_data : BundleData
The raw data readers for this bundle.
"""
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestr = most_recent_data(name, timestamp, environ=environ)
return BundleData(
asset_finder=AssetFinder(
asset_db_path(name, timestr, environ=environ),
),
equity_minute_bar_reader=BcolzMinuteBarReader(
minute_equity_path(name, timestr, environ=environ),
),
equity_daily_bar_reader=BcolzDailyBarReader(
daily_equity_path(name, timestr, environ=environ),
),
adjustment_reader=SQLiteAdjustmentReader(
adjustment_db_path(name, timestr, environ=environ),
),
)
@preprocess(
before=optionally(ensure_timestamp),
after=optionally(ensure_timestamp),
)
def clean(name,
before=None,
after=None,
keep_last=None,
environ=os.environ):
"""Clean up data that was created with ``ingest`` or
``$ python -m zipline ingest``
Parameters
----------
name : str
The name of the bundle to remove data for.
before : datetime, optional
Remove data ingested before this date.
This argument is mutually exclusive with: keep_last
after : datetime, optional
Remove data ingested after this date.
This argument is mutually exclusive with: keep_last
keep_last : int, optional
Remove all but the last ``keep_last`` ingestions.
This argument is mutually exclusive with:
before
after
environ : mapping, optional
The environment variables. Defaults of os.environ.
Returns
-------
cleaned : set[str]
The names of the runs that were removed.
Raises
------
BadClean
Raised when ``before`` and or ``after`` are passed with
``keep_last``. This is a subclass of ``ValueError``.
"""
try:
all_runs = sorted(
filter(
complement(pth.hidden),
os.listdir(pth.data_path([name], environ=environ)),
),
key=from_bundle_ingest_dirname,
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise UnknownBundle(name)
if ((before is not None or after is not None) and
keep_last is not None):
raise BadClean(before, after, keep_last)
if keep_last is None:
def should_clean(name):
dt = from_bundle_ingest_dirname(name)
return (
(before is not None and dt < before) or
(after is not None and dt > after)
)
elif keep_last >= 0:
last_n_dts = set(take(keep_last, reversed(all_runs)))
def should_clean(name):
return name not in last_n_dts
else:
raise BadClean(before, after, keep_last)
cleaned = set()
for run in all_runs:
if should_clean(run):
path = pth.data_path([name, run], environ=environ)
shutil.rmtree(path)
cleaned.add(path)
return cleaned
return BundleCore(bundles, register, unregister, ingest, load, clean)
bundles, register, unregister, ingest, load, clean = _make_bundle_core() | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/core.py | core.py |
from zipline.data import bundles
from zipline.gens.brokers.ib_broker2 import IBBroker
import pandas as pd
from datetime import datetime
import pytz
import pickle
from zipline.data.bundles.sharadar_ext import EXCLUSIONS_FILE
import os
def exclude_from_local(bundle='sharadar-ext',
):
from logbook import Logger
log = Logger(__name__)
tws_uri = 'localhost:7496:1'
broker = IBBroker(tws_uri)
bundle_data = bundles.load(
bundle,
)
all_sids = bundle_data.asset_finder.sids
all_assets = bundle_data.asset_finder.retrieve_all(all_sids)
exclusions = []
for i, asset in enumerate(all_assets):
live_today = pd.Timestamp(datetime.utcnow().date()).replace(tzinfo=pytz.UTC)
symbol = asset.symbol
if asset.to_dict()['end_date'] + pd.offsets.BDay(1) >= live_today:
print(f'Checking {asset.symbol} symbol ({i+1}/{len(all_assets)})')
contracts = None
while contracts is None:
contracts = broker.reqMatchingSymbols(symbol)
if symbol not in [c.contract.symbol for c in contracts] and '^' not in symbol:
log.warning(f'!!!No IB ticker data for {symbol}!!!')
exclusions.append(symbol)
continue
ticker = broker.subscribe_to_market_data(symbol)
broker.cancelMktData(ticker.contract)
if pd.isna(ticker.last) and pd.isna(ticker.close) and '^' not in symbol:
log.warning(f'!!!No IB market data for {symbol}!!!')
exclusions.append(symbol)
else:
log.info(f'Skipping check for {asset.symbol} as it is not traded any more')
with open(EXCLUSIONS_FILE, 'wb') as f:
pickle.dump(exclusions, f)
print(f'{len(exclusions)} exclusions found!')
def exclude_from_web(bundle_module='sharadar_ext',
look_for_file=False,
):
import importlib
from logbook import Logger
log = Logger(__name__)
if look_for_file and os.path.exists(EXCLUSIONS_FILE):
log.info('No need to run excluder, exclusions file has been found!')
return
full_bundle_module_name = 'zipline.data.bundles.' + bundle_module
bundle_module_ref = importlib.import_module(full_bundle_module_name)
tws_uri = 'localhost:7496:1'
broker = IBBroker(tws_uri)
exclusions = []
api_key = os.environ.get('QUANDL_API_KEY')
raw_data = bundle_module_ref.fetch_data_table(api_key=api_key,
show_progress=True,
retries=1,
check_exclusions=False,
)
asset_metadata = bundle_module_ref.gen_asset_metadata(raw_data[['symbol', 'date']],
True
)
for i, asset in asset_metadata.iterrows():
live_today = pd.Timestamp(datetime.utcnow().date()).replace(tzinfo=pytz.UTC)
asset_end_date = pd.Timestamp(asset['end_date']).replace(tzinfo=pytz.UTC)
symbol = asset['symbol']
if asset_end_date + pd.offsets.BDay(1) >= live_today:
log.info(f'Checking {symbol} symbol ({i+1}/{len(asset_metadata)})')
contracts = None
while contracts is None:
contracts = broker.reqMatchingSymbols(symbol)
if symbol not in [c.contract.symbol for c in contracts] and '^' not in symbol:
log.warning(f'!!!No IB ticker data for {symbol}!!!')
exclusions.append(symbol)
continue
ticker = broker.subscribe_to_market_data(symbol)
broker.cancelMktData(ticker.contract)
if pd.isna(ticker.last) and pd.isna(ticker.close) and '^' not in symbol:
log.warning(f'!!!No IB market data for {symbol}!!!')
exclusions.append(symbol)
else:
log.info(f'Skipping check for {symbol} as it is not traded any more: asset_end_date is {asset_end_date}')
with open(EXCLUSIONS_FILE, 'wb') as f:
pickle.dump(exclusions, f)
log.info(f'{len(exclusions)} exclusions found!') | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/excluder.py | excluder.py |
from io import BytesIO
import tarfile
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from zipline.utils.deprecate import deprecated
from . import core as bundles
import numpy as np
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/WIKI/PRICES.csv?'
)
def format_metadata_url(api_key):
""" Build the query URL for Quandl WIKI Prices metadata.
"""
query_params = [('api_key', api_key), ('qopts.export', 'true')]
return (
QUANDL_DATA_URL + urlencode(query_params)
)
def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'ex-dividend',
'split_ratio',
],
)
data_table.rename(
columns={
'ticker': 'symbol',
'ex-dividend': 'ex_dividend',
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key,
show_progress,
retries):
""" Fetch WIKI Prices data table from Quandl
"""
for _ in range(retries):
try:
if show_progress:
log.info('Downloading WIKI metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading WIKI Prices table from Quandl"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info('Generating asset metadata.')
data = data.groupby(
by='symbol'
).agg(
{'date': [np.min, np.max]}
)
data.reset_index(inplace=True)
data['start_date'] = data.date.amin
data['end_date'] = data.date.amax
del data['date']
data.columns = data.columns.get_level_values(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1)
return data
def parse_splits(data, show_progress):
if show_progress:
log.info('Parsing split data.')
data['split_ratio'] = 1.0 / data.split_ratio
data.rename(
columns={
'split_ratio': 'ratio',
'date': 'effective_date',
},
inplace=True,
copy=False,
)
return data
def parse_dividends(data, show_progress):
if show_progress:
log.info('Parsing dividend data.')
data['record_date'] = data['declared_date'] = data['pay_date'] = pd.NaT
data.rename(
columns={
'ex_dividend': 'amount',
'date': 'ex_date',
},
inplace=True,
copy=False,
)
return data
def parse_pricing_and_vol(data,
sessions,
symbol_map):
for asset_id, symbol in iteritems(symbol_map):
asset_data = data.xs(
symbol,
level=1
).reindex(
sessions.tz_localize(None)
).fillna(0.0)
yield asset_id, asset_data
@bundles.register('quandl')
def quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[[
'sid',
'date',
'split_ratio',
]].loc[raw_data.split_ratio != 1],
show_progress=show_progress
),
dividends=parse_dividends(
raw_data[[
'sid',
'date',
'ex_dividend',
]].loc[raw_data.ex_dividend != 0],
show_progress=show_progress
)
)
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
QUANTOPIAN_QUANDL_URL = (
'https://s3.amazonaws.com/quantopian-public-zipline-data/quandl'
)
@bundles.register('quantopian-quandl', create_writers=False)
@deprecated(
'quantopian-quandl has been deprecated and '
'will be removed in a future release.'
)
def quantopian_quandl_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
if show_progress:
data = download_with_progress(
QUANTOPIAN_QUANDL_URL,
chunk_size=ONE_MEGABYTE,
label="Downloading Bundle: quantopian-quandl",
)
else:
data = download_without_progress(QUANTOPIAN_QUANDL_URL)
with tarfile.open('r', fileobj=data) as tar:
if show_progress:
log.info("Writing data to %s." % output_dir)
tar.extractall(output_dir)
register_calendar_alias("QUANDL", "NYSE") | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/quandl.py | quandl.py |
from io import BytesIO
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from zipline.data.bundles import core as bundles # looking in .zipline/extensions.py
import numpy as np
# Code from:
# Quantopian Zipline Issues:
# "Cannot find data bundle during ingest #2275"
# https://github.com/quantopian/zipline/issues/2275
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SEP.csv?'
)
@bundles.register('sharadar-prices')
def sharadar_prices_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
###ticker2sid_map = {}
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
# raw_data.index = pd.DatetimeIndex(raw_data.date)
###ajjc changes
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
# read in Dividend History
# ajjc pharrin----------------------
###uv = raw_data.symbol.unique() # get unique m_tickers (Zacks primary key)
# iterate over all the unique securities and pack data, and metadata
# for writing
# counter of valid securites, this will be our primary key
###sec_counter = 0
###for tkr in uv:
### #df_tkr = raw_data[raw_data['symbol'] == tkr]
### ticker2sid_map[tkr] = sec_counter # record the sid for use later
### sec_counter += 1
### dfd = pd.read_csv(file_name, index_col='date',
### parse_dates=['date'], na_values=['NA'])
# drop rows where dividends == 0.0
raw_data = raw_data[raw_data["dividends"] != 0.0]
raw_data.set_index(['date', 'sid'], inplace=True)
# raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.date
# raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.date
raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.index.get_level_values('date')
raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.index.get_level_values('date')
# raw_data.loc[:, 'sid'] = raw_data.loc[:, 'symbol'].apply(lambda x: ticker2sid_map[x])
raw_data = raw_data.rename(columns={'dividends': 'amount'})
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume','symbol'], axis=1)
raw_data.reset_index(inplace=True)
raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'symbol', 'date'], axis=1)
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'lastupdated', 'ticker', 'closeunadj'], axis=1)
# # format dfd to have sid
adjustment_writer.write(dividends=raw_data)
# ajjc ----------------------------------
def format_metadata_url(api_key):
""" Build the query URL for Quandl Prices metadata.
"""
query_params = [('api_key', api_key), ('qopts.export', 'true')]
return (
QUANDL_DATA_URL + urlencode(query_params)
)
def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'dividends',
##'closeunadj',
##'lastupdated' #prune last two columns for zipline bundle load
],
)
data_table.rename(
columns={
'ticker': 'symbol'
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key,
show_progress,
retries):
for _ in range(retries):
try:
if show_progress:
log.info('Downloading Sharadar Price metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading Prices table from Quandl Sharadar"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info('Generating asset metadata.')
data = data.groupby(
by='symbol'
).agg(
{'date': [np.min, np.max]}
)
data.reset_index(inplace=True)
data['start_date'] = data.date.amin
data['end_date'] = data.date.amax
del data['date']
data.columns = data.columns.get_level_values(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1)
return data
def parse_pricing_and_vol(data,
sessions,
symbol_map):
for asset_id, symbol in iteritems(symbol_map):
asset_data = data.xs(
symbol,
level=1
).reindex(
sessions.tz_localize(None)
).fillna(0.0)
yield asset_id, asset_data
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
register_calendar_alias("sharadar-prices", "NYSE") | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/sharadar.py | sharadar.py |
from io import BytesIO
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from six import iteritems
from trading_calendars import register_calendar_alias
from zipline.data.bundles import core as bundles # looking in .zipline/extensions.py
import numpy as np
# Code from:
# Quantopian Zipline Issues:
# "Cannot find data bundle during ingest #2275"
# https://github.com/quantopian/zipline/issues/2275
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = (
'https://www.quandl.com/api/v3/datatables/SHARADAR/SFP.csv?'
)
@bundles.register('sharadar-funds')
def sharadar_prices_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
api_key = environ.get('QUANDL_API_KEY')
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
###ticker2sid_map = {}
raw_data = fetch_data_table(
api_key,
show_progress,
environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5)
)
asset_metadata = gen_asset_metadata(
raw_data[['symbol', 'date']],
show_progress
)
asset_db_writer.write(asset_metadata)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(['date', 'symbol'], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(
raw_data,
sessions,
symbol_map
),
show_progress=show_progress
)
raw_data.reset_index(inplace=True)
# raw_data.index = pd.DatetimeIndex(raw_data.date)
###ajjc changes
raw_data['symbol'] = raw_data['symbol'].astype('category')
raw_data['sid'] = raw_data.symbol.cat.codes
# read in Dividend History
# ajjc pharrin----------------------
###uv = raw_data.symbol.unique() # get unique m_tickers (Zacks primary key)
# iterate over all the unique securities and pack data, and metadata
# for writing
# counter of valid securites, this will be our primary key
###sec_counter = 0
###for tkr in uv:
### #df_tkr = raw_data[raw_data['symbol'] == tkr]
### ticker2sid_map[tkr] = sec_counter # record the sid for use later
### sec_counter += 1
### dfd = pd.read_csv(file_name, index_col='date',
### parse_dates=['date'], na_values=['NA'])
# drop rows where dividends == 0.0
raw_data = raw_data[raw_data["dividends"] != 0.0]
raw_data.set_index(['date', 'sid'], inplace=True)
# raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.date
# raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.date
raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.index.get_level_values('date')
raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.index.get_level_values('date')
# raw_data.loc[:, 'sid'] = raw_data.loc[:, 'symbol'].apply(lambda x: ticker2sid_map[x])
raw_data = raw_data.rename(columns={'dividends': 'amount'})
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume','symbol'], axis=1)
raw_data.reset_index(inplace=True)
raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'symbol', 'date'], axis=1)
# raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'lastupdated', 'ticker', 'closeunadj'], axis=1)
# # format dfd to have sid
adjustment_writer.write(dividends=raw_data)
# ajjc ----------------------------------
def format_metadata_url(api_key):
""" Build the query URL for Quandl Prices metadata.
"""
query_params = [('api_key', api_key), ('qopts.export', 'true')]
return (
QUANDL_DATA_URL + urlencode(query_params)
)
def load_data_table(file,
index_col,
show_progress=False):
""" Load data table from zip file provided by Quandl.
"""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info('Parsing raw data.')
data_table = pd.read_csv(
table_file,
parse_dates=['date'],
index_col=index_col,
usecols=[
'ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'dividends',
##'closeunadj',
##'lastupdated' #prune last two columns for zipline bundle load
],
)
data_table.rename(
columns={
'ticker': 'symbol'
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key,
show_progress,
retries):
for _ in range(retries):
try:
if show_progress:
log.info('Downloading Sharadar Price metadata.')
metadata = pd.read_csv(
format_metadata_url(api_key)
)
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, 'file.link']
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading Prices table from Quandl Sharadar"
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info('Generating asset metadata.')
data = data.groupby(
by='symbol'
).agg(
{'date': [np.min, np.max]}
)
data.reset_index(inplace=True)
data['start_date'] = data.date.amin
data['end_date'] = data.date.amax
del data['date']
data.columns = data.columns.get_level_values(0)
data['exchange'] = 'QUANDL'
data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1)
return data
def parse_pricing_and_vol(data,
sessions,
symbol_map):
for asset_id, symbol in iteritems(symbol_map):
asset_data = data.xs(
symbol,
level=1
).reindex(
sessions.tz_localize(None)
).fillna(0.0)
yield asset_id, asset_data
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers['content-length'])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
register_calendar_alias("sharadar-funds", "NYSE") | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/data/bundles/sharadar_funds.py | sharadar_funds.py |
import sys
from textwrap import dedent
class _Sentinel(object):
"""Base class for Sentinel objects.
"""
__slots__ = ('__weakref__',)
def is_sentinel(obj):
return isinstance(obj, _Sentinel)
def sentinel(name, doc=None):
try:
value = sentinel._cache[name] # memoized
except KeyError:
pass
else:
if doc == value.__doc__:
return value
raise ValueError(dedent(
"""\
New sentinel value %r conflicts with an existing sentinel of the
same name.
Old sentinel docstring: %r
New sentinel docstring: %r
The old sentinel was created at: %s
Resolve this conflict by changing the name of one of the sentinels.
""",
) % (name, value.__doc__, doc, value._created_at))
try:
frame = sys._getframe(1)
except ValueError:
frame = None
if frame is None:
created_at = '<unknown>'
else:
created_at = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno)
@object.__new__ # bind a single instance to the name 'Sentinel'
class Sentinel(_Sentinel):
__doc__ = doc
__name__ = name
# store created_at so that we can report this in case of a duplicate
# name violation
_created_at = created_at
def __new__(cls):
raise TypeError('cannot create %r instances' % name)
def __repr__(self):
return 'sentinel(%r)' % name
def __reduce__(self):
return sentinel, (name, doc)
def __deepcopy__(self, _memo):
return self
def __copy__(self):
return self
cls = type(Sentinel)
try:
cls.__module__ = frame.f_globals['__name__']
except (AttributeError, KeyError):
# Couldn't get the name from the calling scope, just use None.
# AttributeError is when frame is None, KeyError is when f_globals
# doesn't hold '__name__'
cls.__module__ = None
sentinel._cache[name] = Sentinel # cache result
return Sentinel
sentinel._cache = {} | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/sentinel.py | sentinel.py |
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT
elif len(initial_dates) != window:
raise ValueError('initial_dates must be of length window')
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype='datetime64[ns]',
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
'missing_dts must be a non-empty index',
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, 'right')
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc')
if values.ndim == 3:
return pd.Panel(values, self.items, major_axis, self.minor_axis,
dtype=self.dtype)
elif values.ndim == 2:
return pd.DataFrame(values, major_axis, self.minor_axis,
dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
@property
def window_length(self):
return self._window
class MutableIndexRollingPanel(object):
"""
A version of RollingPanel that exists for backwards compatibility with
batch_transform. This is a copy to allow behavior of RollingPanel to drift
away from this without breaking this class.
This code should be considered frozen, and should not be used in the
future. Instead, see RollingPanel.
"""
def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64):
self._pos = 0
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.cap = cap_multiple * window
self.dtype = dtype
self.date_buf = np.empty(self.cap, dtype='M8[ns]')
self.buffer = self._create_buffer()
def _oldest_frame_idx(self):
return max(self._pos - self._window, 0)
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._oldest_frame_idx(), :]
return self.buffer.iloc[:, self._oldest_frame_idx(), :]
def set_sids(self, sids):
self.minor_axis = _ensure_index(sids)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def get_current(self):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
where = slice(self._oldest_frame_idx(), self._pos)
major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
return pd.Panel(self.buffer.values[:, where, :], self.items,
major_axis, self.minor_axis, dtype=self.dtype)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._oldest_frame_idx(), self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._oldest_frame_idx(), self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc')
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, :self._window, :] = \
self.buffer.values[:, -self._window:, :]
self.date_buf[:self._window] = self.date_buf[-self._window:]
self._pos = self._window
def add_frame(self, tick, frame, minor_axis=None, items=None):
"""
"""
if self._pos == self.cap:
self._roll_data()
if isinstance(frame, pd.DataFrame):
minor_axis = frame.columns
items = frame.index
if set(minor_axis).difference(set(self.minor_axis)) or \
set(items).difference(set(self.items)):
self._update_buffer(frame)
vals = frame.T.astype(self.dtype)
self.buffer.loc[:, self._pos, :] = vals
self.date_buf[self._pos] = tick
self._pos += 1
def _update_buffer(self, frame):
# Get current frame as we only need to care about the data that is in
# the active window
old_buffer = self.get_current()
if self._pos >= self._window:
# Don't count the last major_axis entry if we're past our window,
# since it's about to roll off the end of the panel.
old_buffer = old_buffer.iloc[:, 1:, :]
nans = pd.isnull(old_buffer)
# Find minor_axes that have only nans
# Note that minor is axis 2
non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))])
# Determine new columns to be added
new_cols = set(frame.columns).difference(non_nan_cols)
# Update internal minor axis
self.minor_axis = _ensure_index(new_cols.union(non_nan_cols))
# Same for items (fields)
# Find items axes that have only nans
# Note that items is axis 0
non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))])
new_items = set(frame.index).difference(non_nan_items)
self.items = _ensure_index(new_items.union(non_nan_items))
# :NOTE:
# There is a simpler and 10x faster way to do this:
#
# Reindex buffer to update axes (automatically adds nans)
# self.buffer = self.buffer.reindex(items=self.items,
# major_axis=np.arange(self.cap),
# minor_axis=self.minor_axis)
#
# However, pandas==0.12.0, for which we remain backwards compatible,
# has a bug in .reindex() that this triggers. Using .update() as before
# seems to work fine.
new_buffer = self._create_buffer()
new_buffer.update(
self.buffer.loc[non_nan_items, :, non_nan_cols])
self.buffer = new_buffer | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/data.py | data.py |
from functools import reduce
from pprint import pformat
from six import viewkeys, iteritems
from six.moves import map, zip
from toolz import curry, flip
from .sentinel import sentinel
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Examples
--------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Examples
--------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(viewkeys, dicts)):
raise ValueError(
"dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts)
)
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
elem = next(it)
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
'element at index 0 was length %d, expected %d' % (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
'element at index %d was length %d, expected %d' % (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel('_no_default')
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute('__name__')
with_doc = set_attribute('__doc__')
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f),
reversed(seq),
*(default,) if default is not _no_default else ()
)
def invert(d):
"""
Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}}
"""
out = {}
for k, v in iteritems(d):
try:
out[v].add(k)
except KeyError:
out[v] = {k}
return out | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/functional.py | functional.py |
from six.moves import map as imap
from toolz import compose, identity
class ApplyAsyncResult(object):
"""An object that boxes results for calls to
:meth:`~zipline.utils.pool.SequentialPool.apply_async`.
Parameters
----------
value : any
The result of calling the function, or any exception that was raised.
successful : bool
If ``True``, ``value`` is the return value of the function.
If ``False``, ``value`` is the exception that was raised when calling
the functions.
"""
def __init__(self, value, successful):
self._value = value
self._successful = successful
def successful(self):
"""Did the function execute without raising an exception?
"""
return self._successful
def get(self):
"""Return the result of calling the function or reraise any exceptions
that were raised.
"""
if not self._successful:
raise self._value
return self._value
def ready(self):
"""Has the function finished executing.
Notes
-----
In the :class:`~zipline.utils.pool.SequentialPool` case, this is always
``True``.
"""
return True
def wait(self):
"""Wait until the function is finished executing.
Notes
-----
In the :class:`~zipline.utils.pool.SequentialPool` case, this is a nop
because the function is computed eagerly in the same thread as the
call to :meth:`~zipline.utils.pool.SequentialPool.apply_async`.
"""
pass
class SequentialPool(object):
"""A dummy pool object that iterates sequentially in a single thread.
Methods
-------
map(f: callable[A, B], iterable: iterable[A]) -> list[B]
Apply a function to each of the elements of ``iterable``.
imap(f: callable[A, B], iterable: iterable[A]) -> iterable[B]
Lazily apply a function to each of the elements of ``iterable``.
imap_unordered(f: callable[A, B], iterable: iterable[A]) -> iterable[B]
Lazily apply a function to each of the elements of ``iterable`` but
yield values as they become available. The resulting iterable is
unordered.
Notes
-----
This object is useful for testing to mock out the ``Pool`` interface
provided by gevent or multiprocessing.
See Also
--------
:class:`multiprocessing.Pool`
"""
map = staticmethod(compose(list, imap))
imap = imap_unordered = staticmethod(imap)
@staticmethod
def apply_async(f, args=(), kwargs=None, callback=None):
"""Apply a function but emulate the API of an asynchronous call.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
future : ApplyAsyncResult
The result of calling the function boxed in a future-like api.
Notes
-----
This calls the function eagerly but wraps it so that ``SequentialPool``
can be used where a :class:`multiprocessing.Pool` or
:class:`gevent.pool.Pool` would be used.
"""
try:
value = (identity if callback is None else callback)(
f(*args, **kwargs or {}),
)
successful = True
except Exception as e:
value = e
successful = False
return ApplyAsyncResult(value, successful)
@staticmethod
def apply(f, args=(), kwargs=None):
"""Apply a function.
Parameters
----------
f : callable
The function to call.
args : tuple, optional
The positional arguments.
kwargs : dict, optional
The keyword arguments.
Returns
-------
result : any
f(*args, **kwargs)
"""
return f(*args, **kwargs or {})
@staticmethod
def close():
pass
@staticmethod
def join():
pass | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/pool.py | pool.py |
import re
from six import iteritems
from textwrap import dedent
from toolz import curry
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC = dedent(
"""\
frequency : {'year_start', 'quarter_start', 'month_start', 'week_start'}
A string indicating desired sampling dates:
* 'year_start' -> first trading day of each year
* 'quarter_start' -> first trading day of January, April, July, October
* 'month_start' -> first trading day of each month
* 'week_start' -> first trading_day of each week
"""
)
PIPELINE_ALIAS_NAME_DOC = dedent(
"""\
name : str
The name to alias this term as.
""",
)
def pad_lines_after_first(prefix, s):
"""Apply a prefix to each line in s after the first."""
return ('\n' + prefix).join(s.splitlines())
def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in iteritems(formatters):
# Search for '{name}', with optional leading whitespace.
regex = re.compile('^(\s*)' + '({' + target + '})$', re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(
target, owner_name
)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params)
def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Parameters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/sharedoc.py | sharedoc.py |
from abc import ABCMeta, abstractmethod
from six import with_metaclass, iteritems
# Consistent error to be thrown in various cases regarding overriding
# `final` attributes.
_type_error = TypeError('Cannot override final attribute')
def bases_mro(bases):
"""
Yield classes in the order that methods should be looked up from the
base classes of an object.
"""
for base in bases:
for class_ in base.__mro__:
yield class_
def is_final(name, mro):
"""
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
"""
return any(isinstance(getattr(c, '__dict__', {}).get(name), final)
for c in bases_mro(mro))
class FinalMeta(type):
"""A metaclass template for classes the want to prevent subclassess from
overriding a some methods or attributes.
"""
def __new__(mcls, name, bases, dict_):
for k, v in iteritems(dict_):
if is_final(k, bases):
raise _type_error
setattr_ = dict_.get('__setattr__')
if setattr_ is None:
# No `__setattr__` was explicitly defined, look up the super
# class's. `bases[0]` will have a `__setattr__` because
# `object` does so we don't need to worry about the mro.
setattr_ = bases[0].__setattr__
if not is_final('__setattr__', bases) \
and not isinstance(setattr_, final):
# implicitly make the `__setattr__` a `final` object so that
# users cannot just avoid the descriptor protocol.
dict_['__setattr__'] = final(setattr_)
return super(FinalMeta, mcls).__new__(mcls, name, bases, dict_)
def __setattr__(self, name, value):
"""This stops the `final` attributes from being reassigned on the
class object.
"""
if is_final(name, self.__mro__):
raise _type_error
super(FinalMeta, self).__setattr__(name, value)
class final(with_metaclass(ABCMeta)):
"""
An attribute that cannot be overridden.
This is like the final modifier in Java.
Example usage:
>>> from six import with_metaclass
>>> class C(with_metaclass(FinalMeta, object)):
... @final
... def f(self):
... return 'value'
...
This constructs a class with final method `f`. This cannot be overridden
on the class object or on any instance. You cannot override this by
subclassing `C`; attempting to do so will raise a `TypeError` at class
construction time.
"""
def __new__(cls, attr):
# Decide if this is a method wrapper or an attribute wrapper.
# We are going to cache the `callable` check by creating a
# method or attribute wrapper.
if hasattr(attr, '__get__'):
return object.__new__(finaldescriptor)
else:
return object.__new__(finalvalue)
def __init__(self, attr):
self._attr = attr
def __set__(self, instance, value):
"""
`final` objects cannot be reassigned. This is the most import concept
about `final`s.
Unlike a `property` object, this will raise a `TypeError` when you
attempt to reassign it.
"""
raise _type_error
@abstractmethod
def __get__(self, instance, owner):
raise NotImplementedError('__get__')
class finalvalue(final):
"""
A wrapper for a non-descriptor attribute.
"""
def __get__(self, instance, owner):
return self._attr
class finaldescriptor(final):
"""
A final wrapper around a descriptor.
"""
def __get__(self, instance, owner):
return self._attr.__get__(instance, owner) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/final.py | final.py |
import warnings
from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.errors import SymbolNotFound
from zipline.finance.asset_restrictions import SecurityListRestrictions
from zipline.zipline_warnings import ZiplineDeprecationWarning
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
warnings.warn(
'Iterating over security_lists is deprecated. Use '
'`for sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return iter(self.current_securities(self.current_date()))
def __contains__(self, item):
warnings.warn(
'Evaluating inclusion in security_lists is deprecated. Use '
'`sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return item in self.current_securities(self.current_date())
def current_securities(self, dt):
for kd in self._knowledge_dates:
if dt < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
try:
asset = self.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
except SymbolNotFound:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func,
asset_finder=self.asset_finder
)
return self._leveraged_etf
@property
def restrict_leveraged_etfs(self):
return SecurityListRestrictions(self.leveraged_etf_list)
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/security_list.py | security_list.py |
import pandas as pd
import pytz
# import warnings
from datetime import datetime
from dateutil import rrule
from functools import partial
# from zipline.zipline_warnings import ZiplineDeprecationWarning
# IMPORTANT: This module is deprecated and is only here for temporary backwards
# compatibility. Look at the `trading-calendars`
# module, as well as the calendar definitions in `trading_calendars`.
# TODO: The new calendar API is currently in flux, so the deprecation
# warning for this module is currently disabled. Re-enable once
# the new API is stabilized.
#
# warnings.warn(
# "The `tradingcalendar` module is deprecated. See the "
# "`trading-calendars` module, as well as the "
# "calendar definitions in `trading-calendars`.",
# category=ZiplineDeprecationWarning,
# stacklevel=1,
# )
start = pd.Timestamp('1990-01-01', tz='UTC')
end_base = pd.Timestamp('today', tz='UTC')
# Give an aggressive buffer for logic that needs to use the next trading
# day or minute.
end = end_base + pd.Timedelta(days=365)
def canonicalize_datetime(dt):
# Strip out any HHMMSS or timezone info in the user's datetime, so that
# all the datetimes we return will be 00:00:00 UTC.
return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc)
def get_non_trading_days(start, end):
non_trading_rules = []
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
weekends = rrule.rrule(
rrule.YEARLY,
byweekday=(rrule.SA, rrule.SU),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(weekends)
new_years = rrule.rrule(
rrule.MONTHLY,
byyearday=1,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years)
new_years_sunday = rrule.rrule(
rrule.MONTHLY,
byyearday=2,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(new_years_sunday)
mlk_day = rrule.rrule(
rrule.MONTHLY,
bymonth=1,
byweekday=(rrule.MO(+3)),
cache=True,
dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc),
until=end
)
non_trading_rules.append(mlk_day)
presidents_day = rrule.rrule(
rrule.MONTHLY,
bymonth=2,
byweekday=(rrule.MO(3)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(presidents_day)
good_friday = rrule.rrule(
rrule.DAILY,
byeaster=-2,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(good_friday)
memorial_day = rrule.rrule(
rrule.MONTHLY,
bymonth=5,
byweekday=(rrule.MO(-1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(memorial_day)
july_4th = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=4,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th)
july_4th_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_sunday)
july_4th_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(july_4th_saturday)
labor_day = rrule.rrule(
rrule.MONTHLY,
bymonth=9,
byweekday=(rrule.MO(1)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(labor_day)
thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
byweekday=(rrule.TH(4)),
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(thanksgiving)
christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=25,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas)
christmas_sunday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.MO,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_sunday)
# If Christmas is a Saturday then 24th, a Friday is observed.
christmas_saturday = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=rrule.FR,
cache=True,
dtstart=start,
until=end
)
non_trading_rules.append(christmas_saturday)
non_trading_ruleset = rrule.rruleset()
for rule in non_trading_rules:
non_trading_ruleset.rrule(rule)
non_trading_days = non_trading_ruleset.between(start, end, inc=True)
# Add September 11th closings
# http://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks
# Due to the terrorist attacks, the stock market did not open on 9/11/2001
# It did not open again until 9/17/2001.
#
# September 2001
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
for day_num in range(11, 17):
non_trading_days.append(
datetime(2001, 9, day_num, tzinfo=pytz.utc))
# Add closings due to Hurricane Sandy in 2012
# http://en.wikipedia.org/wiki/Hurricane_sandy
#
# The stock exchange was closed due to Hurricane Sandy's
# impact on New York.
# It closed on 10/29 and 10/30, reopening on 10/31
# October 2012
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
for day_num in range(29, 31):
non_trading_days.append(
datetime(2012, 10, day_num, tzinfo=pytz.utc))
# Misc closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# National Days of Mourning
# - President Richard Nixon
non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc))
# - President Ronald W. Reagan - June 11, 2004
non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc))
# - President Gerald R. Ford - Jan 2, 2007
non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc))
non_trading_days.sort()
return pd.DatetimeIndex(non_trading_days)
non_trading_days = get_non_trading_days(start, end)
trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days)
def get_trading_days(start, end, trading_day=trading_day):
return pd.date_range(start=start.date(),
end=end.date(),
freq=trading_day).tz_localize('UTC')
trading_days = get_trading_days(start, end)
def get_early_closes(start, end):
# 1:00 PM close rules based on
# http://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa
# and verified against http://www.nyse.com/pdfs/closings.pdf
# These rules are valid starting in 1993
start = canonicalize_datetime(start)
end = canonicalize_datetime(end)
start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc))
end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc))
# Not included here are early closes prior to 1993
# or unplanned early closes
early_close_rules = []
day_after_thanksgiving = rrule.rrule(
rrule.MONTHLY,
bymonth=11,
# 4th Friday isn't correct if month starts on Friday, so restrict to
# day range:
byweekday=(rrule.FR),
bymonthday=range(23, 30),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_after_thanksgiving)
christmas_eve = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=24,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(christmas_eve)
friday_after_christmas = rrule.rrule(
rrule.MONTHLY,
bymonth=12,
bymonthday=26,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# valid 1993-2007
until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(friday_after_christmas)
day_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=(rrule.MO, rrule.TU, rrule.TH),
cache=True,
dtstart=start,
until=end
)
early_close_rules.append(day_before_independence_day)
day_after_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=5,
byweekday=rrule.FR,
cache=True,
dtstart=start,
# starting in 2013: wednesday before independence day
until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc))
)
early_close_rules.append(day_after_independence_day)
wednesday_before_independence_day = rrule.rrule(
rrule.MONTHLY,
bymonth=7,
bymonthday=3,
byweekday=rrule.WE,
cache=True,
# starting in 2013
dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)),
until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc))
)
early_close_rules.append(wednesday_before_independence_day)
early_close_ruleset = rrule.rruleset()
for rule in early_close_rules:
early_close_ruleset.rrule(rule)
early_closes = early_close_ruleset.between(start, end, inc=True)
# Misc early closings from NYSE listing.
# http://www.nyse.com/pdfs/closings.pdf
#
# New Year's Eve
nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc)
if start <= nye_1999 and nye_1999 <= end:
early_closes.append(nye_1999)
early_closes.sort()
return pd.DatetimeIndex(early_closes)
early_closes = get_early_closes(start, end)
def get_open_and_close(day, early_closes):
market_open = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=9,
minute=31),
tz='US/Eastern').tz_convert('UTC')
# 1 PM if early close, 4 PM otherwise
close_hour = 13 if day in early_closes else 16
market_close = pd.Timestamp(
datetime(
year=day.year,
month=day.month,
day=day.day,
hour=close_hour),
tz='US/Eastern').tz_convert('UTC')
return market_open, market_close
def get_open_and_closes(trading_days, early_closes, get_open_and_close):
open_and_closes = pd.DataFrame(index=trading_days,
columns=('market_open', 'market_close'))
get_o_and_c = partial(get_open_and_close, early_closes=early_closes)
open_and_closes['market_open'], open_and_closes['market_close'] = \
zip(*open_and_closes.index.map(get_o_and_c))
return open_and_closes
open_and_closes = get_open_and_closes(trading_days, early_closes,
get_open_and_close) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/tradingcalendar.py | tradingcalendar.py |
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = {
sizeof(t) - 1: t for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
}
_inttypes = list(
pd.Series(_inttypes_map).reindex(
range(max(_inttypes_map.keys())),
method='bfill',
),
)
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/enum.py | enum.py |
from operator import attrgetter
import six
def compose_types(a, *cs):
"""Compose multiple classes together.
Parameters
----------
*mcls : tuple[type]
The classes that you would like to compose
Returns
-------
cls : type
A type that subclasses all of the types in ``mcls``.
Notes
-----
A common use case for this is to build composed metaclasses, for example,
imagine you have some simple metaclass ``M`` and some instance of ``M``
named ``C`` like so:
.. code-block:: python
>>> class M(type):
... def __new__(mcls, name, bases, dict_):
... dict_['ayy'] = 'lmao'
... return super(M, mcls).__new__(mcls, name, bases, dict_)
>>> from six import with_metaclass
>>> class C(with_metaclass(M, object)):
... pass
We now want to create a sublclass of ``C`` that is also an abstract class.
We can use ``compose_types`` to create a new metaclass that is a subclass
of ``M`` and ``ABCMeta``. This is needed because a subclass of a class
with a metaclass must have a metaclass which is a subclass of the metaclass
of the superclass.
.. code-block:: python
>>> from abc import ABCMeta, abstractmethod
>>> class D(with_metaclass(compose_types(M, ABCMeta), C)):
... @abstractmethod
... def f(self):
... raise NotImplementedError('f')
We can see that this class has both metaclasses applied to it:
.. code-block:: python
>>> D.ayy
'lmao'
>>> D()
Traceback (most recent call last):
...
TypeError: Can't instantiate abstract class D with abstract methods f
An important note here is that ``M`` did not use ``type.__new__`` and
instead used ``super()``. This is to support cooperative multiple
inheritence which is needed for ``compose_types`` to work as intended.
After we have composed these types ``M.__new__``\'s super will actually
go to ``ABCMeta.__new__`` and not ``type.__new__``.
Always using ``super()`` to dispatch to your superclass is best practices
anyways so most classes should compose without much special considerations.
"""
if not cs:
# if there are no types to compose then just return the single type
return a
mcls = (a,) + cs
return type(
'compose_types(%s)' % ', '.join(map(attrgetter('__name__'), mcls)),
mcls,
{},
)
def with_metaclasses(metaclasses, *bases):
"""Make a class inheriting from ``bases`` whose metaclass inherits from
all of ``metaclasses``.
Like :func:`six.with_metaclass`, but allows multiple metaclasses.
Parameters
----------
metaclasses : iterable[type]
A tuple of types to use as metaclasses.
*bases : tuple[type]
A tuple of types to use as bases.
Returns
-------
base : type
A subtype of ``bases`` whose metaclass is a subtype of ``metaclasses``.
Notes
-----
The metaclasses must be written to support cooperative multiple
inheritance. This means that they must delegate all calls to ``super()``
instead of inlining their super class by name.
"""
return six.with_metaclass(compose_types(*metaclasses), *bases) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/metautils.py | metautils.py |
from collections import OrderedDict
from datetime import datetime
from distutils.version import StrictVersion
from warnings import (
catch_warnings,
filterwarnings,
)
import numpy as np
from numpy import (
broadcast,
busday_count,
datetime64,
diff,
dtype,
empty,
flatnonzero,
hstack,
isnan,
nan,
vectorize,
where
)
from numpy.lib.stride_tricks import as_strided
from toolz import flip
numpy_version = StrictVersion(np.__version__)
uint8_dtype = dtype('uint8')
bool_dtype = dtype('bool')
int64_dtype = dtype('int64')
float32_dtype = dtype('float32')
float64_dtype = dtype('float64')
complex128_dtype = dtype('complex128')
datetime64D_dtype = dtype('datetime64[D]')
datetime64ns_dtype = dtype('datetime64[ns]')
object_dtype = dtype('O')
# We use object arrays for strings.
categorical_dtype = object_dtype
make_datetime64ns = flip(datetime64, 'ns')
make_datetime64D = flip(datetime64, 'D')
NaTmap = {
dtype('datetime64[%s]' % unit): datetime64('NaT', unit)
for unit in ('ns', 'us', 'ms', 's', 'm', 'D')
}
def NaT_for_dtype(dtype):
"""Retrieve NaT with the same units as ``dtype``.
Parameters
----------
dtype : dtype-coercable
The dtype to lookup the NaT value for.
Returns
-------
NaT : dtype
The NaT value for the given dtype.
"""
return NaTmap[np.dtype(dtype)]
NaTns = NaT_for_dtype(datetime64ns_dtype)
NaTD = NaT_for_dtype(datetime64D_dtype)
_FILLVALUE_DEFAULTS = {
bool_dtype: False,
float32_dtype: nan,
float64_dtype: nan,
datetime64ns_dtype: NaTns,
object_dtype: None,
}
INT_DTYPES_BY_SIZE_BYTES = OrderedDict([
(1, dtype('int8')),
(2, dtype('int16')),
(4, dtype('int32')),
(8, dtype('int64')),
])
UNSIGNED_INT_DTYPES_BY_SIZE_BYTES = OrderedDict([
(1, dtype('uint8')),
(2, dtype('uint16')),
(4, dtype('uint32')),
(8, dtype('uint64')),
])
def int_dtype_with_size_in_bytes(size):
try:
return INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError("No integral dtype whose size is %d bytes." % size)
def unsigned_int_dtype_with_size_in_bytes(size):
try:
return UNSIGNED_INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError(
"No unsigned integral dtype whose size is %d bytes." % size
)
class NoDefaultMissingValue(Exception):
pass
def make_kind_check(python_types, numpy_kind):
"""
Make a function that checks whether a scalar or array is of a given kind
(e.g. float, int, datetime, timedelta).
"""
def check(value):
if hasattr(value, 'dtype'):
return value.dtype.kind == numpy_kind
return isinstance(value, python_types)
return check
is_float = make_kind_check(float, 'f')
is_int = make_kind_check(int, 'i')
is_datetime = make_kind_check(datetime, 'M')
is_object = make_kind_check(object, 'O')
def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith('datetime64'):
if name == 'datetime64[D]':
return make_datetime64D(value)
elif name == 'datetime64[ns]':
return make_datetime64ns(value)
else:
raise TypeError(
"Don't know how to coerce values of dtype %s" % dtype
)
return dtype.type(value)
def default_missing_value_for_dtype(dtype):
"""
Get the default fill value for `dtype`.
"""
try:
return _FILLVALUE_DEFAULTS[dtype]
except KeyError:
raise NoDefaultMissingValue(
"No default value registered for dtype %s." % dtype
)
def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides)
def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,))
def rolling_window(array, length):
"""
Restride an array of shape
(X_0, ... X_N)
into an array of shape
(length, X_0 - length + 1, ... X_N)
where each slice at index i along the first axis is equivalent to
result[i] = array[length * i:length * (i + 1)]
Parameters
----------
array : np.ndarray
The base array.
length : int
Length of the synthetic first axis to generate.
Returns
-------
out : np.ndarray
Example
-------
>>> from numpy import arange
>>> a = arange(25).reshape(5, 5)
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> rolling_window(a, 2)
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9]],
<BLANKLINE>
[[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
<BLANKLINE>
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
<BLANKLINE>
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]]])
"""
orig_shape = array.shape
if not orig_shape:
raise IndexError("Can't restride a scalar.")
elif orig_shape[0] <= length:
raise IndexError(
"Can't restride array of shape {shape} with"
" a window length of {len}".format(
shape=orig_shape,
len=length,
)
)
num_windows = (orig_shape[0] - length + 1)
new_shape = (num_windows, length) + orig_shape[1:]
new_strides = (array.strides[0],) + array.strides
return as_strided(array, new_shape, new_strides)
# Sentinel value that isn't NaT.
_notNaT = make_datetime64D(0)
iNaT = int(NaTns.view(int64_dtype))
assert iNaT == NaTD.view(int64_dtype), "iNaTns != iNaTD"
def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ('m', 'M'):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT
def is_missing(data, missing_value):
"""
Generic is_missing function that handles NaN and NaT.
"""
if is_float(data) and isnan(missing_value):
return isnan(data)
elif is_datetime(data) and isnat(missing_value):
return isnat(data)
return (data == missing_value)
def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = isnat(begindates)
endmask = isnat(enddates)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out
class WarningContext(object):
"""
Re-usable contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return self
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
('ignore',),
{'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'},
)
)
def vectorized_is_element(array, choices):
"""
Check if each element of ``array`` is in choices.
Parameters
----------
array : np.ndarray
choices : object
Object implementing __contains__.
Returns
-------
was_element : np.ndarray[bool]
Array indicating whether each element of ``array`` was in ``choices``.
"""
return vectorize(choices.__contains__, otypes=[bool])(array)
def as_column(a):
"""
Convert an array of shape (N,) into an array of shape (N, 1).
This is equivalent to `a[:, np.newaxis]`.
Parameters
----------
a : np.ndarray
Example
-------
>>> import numpy as np
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> as_column(a)
array([[0],
[1],
[2],
[3],
[4]])
>>> as_column(a).shape
(5, 1)
"""
if a.ndim != 1:
raise ValueError(
"as_column expected an 1-dimensional array, "
"but got an array of shape %s" % a.shape
)
return a[:, None]
def changed_locations(a, include_first):
"""
Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4])
"""
if a.ndim > 1:
raise ValueError("indices_of_changed_values only supports 1D arrays.")
indices = flatnonzero(diff(a)) + 1
if not include_first:
return indices
return hstack([[0], indices]) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/numpy_utils.py | numpy_utils.py |
import functools
from operator import methodcaller
import sys
from six import PY2
if PY2:
from abc import ABCMeta
from types import DictProxyType
from ctypes import py_object, pythonapi
_new_mappingproxy = pythonapi.PyDictProxy_New
_new_mappingproxy.argtypes = [py_object]
_new_mappingproxy.restype = py_object
# Make mappingproxy a "class" so that we can use multipledispatch
# with it or do an ``isinstance(ob, mappingproxy)`` check in Python 2.
# You will never actually get an instance of this object, you will just
# get instances of ``types.DictProxyType``; however, ``mappingproxy`` is
# registered as a virtual super class so ``isinstance`` and ``issubclass``
# will work as expected. The only thing that will appear strange is that:
# ``type(mappingproxy({})) is not mappingproxy``, but you shouldn't do
# that.
class mappingproxy(object):
__metaclass__ = ABCMeta
def __new__(cls, *args, **kwargs):
return _new_mappingproxy(*args, **kwargs)
mappingproxy.register(DictProxyType)
# clear names not imported in the other branch
del DictProxyType
del ABCMeta
del py_object
del pythonapi
def exc_clear():
sys.exc_clear()
def consistent_round(val):
return round(val)
def update_wrapper(wrapper,
wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
"""Backport of Python 3's functools.update_wrapper for __wrapped__.
"""
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Issue #17482: set __wrapped__ last so we don't inadvertently copy it
# from the wrapped function when updating __dict__
wrapper.__wrapped__ = wrapped
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return functools.partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
values_as_list = methodcaller('values')
else:
from types import MappingProxyType as mappingproxy
from math import ceil
def exc_clear():
# exc_clear was removed in Python 3. The except statement automatically
# clears the exception.
pass
def consistent_round(val):
if (val % 1) >= 0.5:
return ceil(val)
else:
return round(val)
update_wrapper = functools.update_wrapper
wraps = functools.wraps
def values_as_list(dictionary):
"""Return the dictionary values as a list without forcing a copy
in Python 2.
"""
return list(dictionary.values())
unicode = type(u'')
__all__ = [
'PY2',
'exc_clear',
'mappingproxy',
'unicode',
'update_wrapper',
'values_as_list',
'wraps',
'consistent_round',
] | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/compat.py | compat.py |
from collections import MutableMapping
import errno
from functools import partial
import os
import pickle
from distutils import dir_util
from shutil import rmtree, move
from tempfile import mkdtemp, NamedTemporaryFile
import pandas as pd
from .compat import PY2
from .context_tricks import nop_context
from .paths import ensure_directory
from .sentinel import sentinel
class Expired(Exception):
"""Marks that a :class:`CachedObject` has expired.
"""
ExpiredCachedObject = sentinel('ExpiredCachedObject')
AlwaysExpired = sentinel('AlwaysExpired')
class CachedObject(object):
"""
A simple struct for maintaining a cached object with an expiration date.
Parameters
----------
value : object
The object to cache.
expires : datetime-like
Expiration date of `value`. The cache is considered invalid for dates
**strictly greater** than `expires`.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> obj = CachedObject(1, expires)
>>> obj.unwrap(expires - Timedelta('1 minute'))
1
>>> obj.unwrap(expires)
1
>>> obj.unwrap(expires + Timedelta('1 minute'))
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Expired: 2014-01-01 00:00:00+00:00
"""
def __init__(self, value, expires):
self._value = value
self._expires = expires
@classmethod
def expired(cls):
"""Construct a CachedObject that's expired at any time.
"""
return cls(ExpiredCachedObject, expires=AlwaysExpired)
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value
def _unsafe_get_value(self):
"""You almost certainly shouldn't use this."""
return self._value
class ExpiringCache(object):
"""
A cache of multiple CachedObjects, which returns the wrapped the value
or raises and deletes the CachedObject if the value has expired.
Parameters
----------
cache : dict-like, optional
An instance of a dict-like object which needs to support at least:
`__del__`, `__getitem__`, `__setitem__`
If `None`, than a dict is used as a default.
cleanup : callable, optional
A method that takes a single argument, a cached object, and is called
upon expiry of the cached object, prior to deleting the object. If not
provided, defaults to a no-op.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> value = 1
>>> cache = ExpiringCache()
>>> cache.set('foo', value, expires)
>>> cache.get('foo', expires - Timedelta('1 minute'))
1
>>> cache.get('foo', expires + Timedelta('1 minute'))
Traceback (most recent call last):
...
KeyError: 'foo'
"""
def __init__(self, cache=None, cleanup=lambda value_to_clean: None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
self.cleanup = cleanup
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
self.cleanup(self._cache[key]._unsafe_get_value())
del self._cache[key]
raise KeyError(key)
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt)
class dataframe_cache(MutableMapping):
"""A disk-backed cache for dataframes.
``dataframe_cache`` is a mutable mapping from string names to pandas
DataFrame objects.
This object may be used as a context manager to delete the cache directory
on exit.
Parameters
----------
path : str, optional
The directory path to the cache. Files will be written as
``path/<keyname>``.
lock : Lock, optional
Thread lock for multithreaded/multiprocessed access to the cache.
If not provided no locking will be used.
clean_on_failure : bool, optional
Should the directory be cleaned up if an exception is raised in the
context manager.
serialize : {'msgpack', 'pickle:<n>'}, optional
How should the data be serialized. If ``'pickle'`` is passed, an
optional pickle protocol can be passed like: ``'pickle:3'`` which says
to use pickle protocol 3.
Notes
-----
The syntax ``cache[:]`` will load all key:value pairs into memory as a
dictionary.
The cache uses a temporary file format that is subject to change between
versions of zipline.
"""
def __init__(self,
path=None,
lock=None,
clean_on_failure=True,
serialization='msgpack'):
self.path = path if path is not None else mkdtemp()
self.lock = lock if lock is not None else nop_context
self.clean_on_failure = clean_on_failure
if serialization == 'msgpack':
self.serialize = pd.DataFrame.to_msgpack
self.deserialize = pd.read_msgpack
self._protocol = None
else:
s = serialization.split(':', 1)
if s[0] != 'pickle':
raise ValueError(
"'serialization' must be either 'msgpack' or 'pickle[:n]'",
)
self._protocol = int(s[1]) if len(s) == 2 else None
self.serialize = self._serialize_pickle
self.deserialize = (
pickle.load if PY2 else
partial(pickle.load, encoding='latin-1')
)
ensure_directory(self.path)
def _serialize_pickle(self, df, path):
with open(path, 'wb') as f:
pickle.dump(df, f, protocol=self._protocol)
def _keypath(self, key):
return os.path.join(self.path, key)
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
if not (self.clean_on_failure or value is None):
# we are not cleaning up after a failure and there was an exception
return
with self.lock:
rmtree(self.path)
def __getitem__(self, key):
if key == slice(None):
return dict(self.items())
with self.lock:
try:
with open(self._keypath(key), 'rb') as f:
return self.deserialize(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(key)
def __setitem__(self, key, value):
with self.lock:
self.serialize(value, self._keypath(key))
def __delitem__(self, key):
with self.lock:
try:
os.remove(self._keypath(key))
except OSError as e:
if e.errno == errno.ENOENT:
# raise a keyerror if this directory did not exist
raise KeyError(key)
# reraise the actual oserror otherwise
raise
def __iter__(self):
return iter(os.listdir(self.path))
def __len__(self):
return len(os.listdir(self.path))
def __repr__(self):
return '<%s: keys={%s}>' % (
type(self).__name__,
', '.join(map(repr, sorted(self))),
)
class working_file(object):
"""A context manager for managing a temporary file that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to NamedTemporaryFile.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_file`` uses :func:`shutil.move` to move the actual files,
meaning it has as strong of guarantees as :func:`shutil.move`.
"""
def __init__(self, final_path, *args, **kwargs):
self._tmpfile = NamedTemporaryFile(delete=False, *args, **kwargs)
self._final_path = final_path
@property
def path(self):
"""Alias for ``name`` to be consistent with
:class:`~zipline.utils.cache.working_dir`.
"""
return self._tmpfile.name
def _commit(self):
"""Sync the temporary file to the final path.
"""
move(self.path, self._final_path)
def __enter__(self):
self._tmpfile.__enter__()
return self
def __exit__(self, *exc_info):
self._tmpfile.__exit__(*exc_info)
if exc_info[0] is None:
self._commit()
class working_dir(object):
"""A context manager for managing a temporary directory that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to tmp_dir.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_dir`` uses :func:`dir_util.copy_tree` to move the actual files,
meaning it has as strong of guarantees as :func:`dir_util.copy_tree`.
"""
def __init__(self, final_path, *args, **kwargs):
self.path = mkdtemp()
self._final_path = final_path
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path
def getpath(self, *path_parts):
"""Get a path relative to the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
return os.path.join(self.path, *path_parts)
def _commit(self):
"""Sync the temporary directory to the final path.
"""
dir_util.copy_tree(self.path, self._final_path)
def __enter__(self):
return self
def __exit__(self, *exc_info):
if exc_info[0] is None:
self._commit()
rmtree(self.path) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/cache.py | cache.py |
from contextlib import contextmanager
from copy import deepcopy
from itertools import product
import operator as op
import warnings
import numpy as np
import pandas as pd
from distutils.version import StrictVersion
from trading_calendars.utils.pandas_utils import days_at_time # noqa: reexport
pandas_version = StrictVersion(pd.__version__)
new_pandas = pandas_version >= StrictVersion('0.19')
skip_pipeline_new_pandas = \
'Pipeline categoricals are not yet compatible with pandas >=0.19'
if pandas_version >= StrictVersion('0.20'):
def normalize_date(dt):
"""
Normalize datetime.datetime value to midnight. Returns datetime.date as
a datetime.datetime at midnight
Returns
-------
normalized : datetime.datetime or Timestamp
"""
return dt.normalize()
else:
from pandas.tseries.tools import normalize_date # noqa
def july_5th_holiday_observance(datetime_index):
return datetime_index[datetime_index.year != 2013]
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values
def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond
_opmap = dict(zip(
product((True, False), repeat=3),
product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)),
))
def mask_between_time(dts, start, end, include_start=True, include_end=True):
"""Return a mask of all of the datetimes in ``dts`` that are between
``start`` and ``end``.
Parameters
----------
dts : pd.DatetimeIndex
The index to mask.
start : time
Mask away times less than the start.
end : time
Mask away times greater than the end.
include_start : bool, optional
Inclusive on ``start``.
include_end : bool, optional
Inclusive on ``end``.
Returns
-------
mask : np.ndarray[bool]
A bool array masking ``dts``.
See Also
--------
:meth:`pandas.DatetimeIndex.indexer_between_time`
"""
# This function is adapted from
# `pandas.Datetime.Index.indexer_between_time` which was originally
# written by Wes McKinney, Chang She, and Grant Roch.
time_micros = dts._get_time_micros()
start_micros = _time_to_micros(start)
end_micros = _time_to_micros(end)
left_op, right_op, join_op = _opmap[
bool(include_start),
bool(include_end),
start_micros <= end_micros,
]
return join_op(
left_op(start_micros, time_micros),
right_op(time_micros, end_micros),
)
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if ix == len(dts) or dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side='left')
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value
def timedelta_to_integral_seconds(delta):
"""
Convert a pd.Timedelta to a number of seconds as an int.
"""
return int(delta.total_seconds())
def timedelta_to_integral_minutes(delta):
"""
Convert a pd.Timedelta to a number of minutes as an int.
"""
return timedelta_to_integral_seconds(delta) // 60
@contextmanager
def ignore_pandas_nan_categorical_warning():
with warnings.catch_warnings():
# Pandas >= 0.18 doesn't like null-ish values in categories, but
# avoiding that requires a broader change to how missing values are
# handled in pipeline, so for now just silence the warning.
warnings.filterwarnings(
'ignore',
category=FutureWarning,
)
yield
_INDEXER_NAMES = [
'_' + name for (name, _) in pd.core.indexing.get_indexers_list()
]
def clear_dataframe_indexer_caches(df):
"""
Clear cached attributes from a pandas DataFrame.
By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on
DataFrames, resulting in refcycles that can lead to unexpectedly long-lived
DataFrames. This function attempts to clear those cycles by deleting the
cached indexers from the frame.
Parameters
----------
df : pd.DataFrame
"""
for attr in _INDEXER_NAMES:
try:
delattr(df, attr)
except AttributeError:
pass
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == 'category']
for col in categorical_columns:
new_categories = sorted(
set().union(
*(frame[col].cat.categories for frame in df_list)
)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list)
def empty_dataframe(*columns):
"""Create an empty dataframe with columns of particular types.
Parameters
----------
*columns
The (column_name, column_dtype) pairs.
Returns
-------
typed_dataframe : pd.DataFrame
The empty typed dataframe.
Examples
--------
>>> df = empty_dataframe(
... ('a', 'int64'),
... ('b', 'float64'),
... ('c', 'datetime64[ns]'),
... )
>>> df
Empty DataFrame
Columns: [a, b, c]
Index: []
df.dtypes
a int64
b float64
c datetime64[ns]
dtype: object
"""
return pd.DataFrame(np.array([], dtype=list(columns))) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/pandas_utils.py | pandas_utils.py |
from errno import EEXIST
import os
from os.path import exists, expanduser, join
import pandas as pd
def hidden(path):
"""Check if a path is hidden.
Parameters
----------
path : str
A filepath.
"""
return os.path.split(path)[1].startswith('.')
def ensure_directory(path):
"""
Ensure that a directory named "path" exists.
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(path):
return
raise
def ensure_directory_containing(path):
"""
Ensure that the directory containing `path` exists.
This is just a convenience wrapper for doing::
ensure_directory(os.path.dirname(path))
"""
ensure_directory(os.path.dirname(path))
def ensure_file(path):
"""
Ensure that a file exists. This will create any parent directories needed
and create an empty file if it does not exist.
Parameters
----------
path : str
The file path to ensure exists.
"""
ensure_directory_containing(path)
open(path, 'a+').close() # touch the file
def update_modified_time(path, times=None):
"""
Updates the modified time of an existing file. This will create any
parent directories needed and create an empty file if it does not exist.
Parameters
----------
path : str
The file path to update.
times : tuple
A tuple of size two; access time and modified time
"""
ensure_directory_containing(path)
os.utime(path, times)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def modified_since(path, dt):
"""
Check whether `path` was modified since `dt`.
Returns False if path doesn't exist.
Parameters
----------
path : str
Path to the file to be checked.
dt : pd.Timestamp
The date against which to compare last_modified_time(path).
Returns
-------
was_modified : bool
Will be ``False`` if path doesn't exists, or if its last modified date
is earlier than or equal to `dt`
"""
return exists(path) and last_modified_time(path) > dt
def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get('ZIPLINE_ROOT', None)
if root is None:
root = expanduser('~/.zipline')
return root
def zipline_path(paths, environ=None):
"""
Get a path relative to the zipline root.
Parameters
----------
paths : list[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline root.
"""
return join(zipline_root(environ=environ), *paths)
def default_extension(environ=None):
"""
Get the path to the default zipline extension file.
Parameters
----------
environ : dict, optional
An environment dict to forwart to zipline_root.
Returns
-------
default_extension_path : str
The file path to the default zipline extension file.
"""
return zipline_path(['extension.py'], environ=environ)
def data_root(environ=None):
"""
The root directory for zipline data files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
data_root : str
The zipline data root.
"""
return zipline_path(['data'], environ=environ)
def ensure_data_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(data_root(environ=environ))
def data_path(paths, environ=None):
"""
Get a path relative to the zipline data directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline data root.
"""
return zipline_path(['data'] + list(paths), environ=environ)
def cache_root(environ=None):
"""
The root directory for zipline cache files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
cache_root : str
The zipline cache root.
"""
return zipline_path(['cache'], environ=environ)
def ensure_cache_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(cache_root(environ=environ))
def cache_path(paths, environ=None):
"""
Get a path relative to the zipline cache directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline cache root.
"""
return zipline_path(['cache'] + list(paths), environ=environ) | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/paths.py | paths.py |
from collections import namedtuple
import inspect
from itertools import chain
from six.moves import map, zip_longest
from zipline.errors import ZiplineError
Argspec = namedtuple('Argspec', ['args', 'starargs', 'kwargs'])
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class Ignore(object):
def __str__(self):
return 'Argument.ignore'
__repr__ = __str__
@singleton
class NoDefault(object):
def __str__(self):
return 'Argument.no_default'
__repr__ = __str__
@singleton
class AnyDefault(object):
def __str__(self):
return 'Argument.any_default'
__repr__ = __str__
class Argument(namedtuple('Argument', ['name', 'default'])):
"""
An argument to a function.
Argument.no_default is a value representing no default to the argument.
Argument.ignore is a value that says you should ignore the default value.
"""
no_default = NoDefault()
any_default = AnyDefault()
ignore = Ignore()
def __new__(cls, name=ignore, default=ignore):
return super(Argument, cls).__new__(cls, name, default)
def __str__(self):
if self.has_no_default(self) or self.ignore_default(self):
return str(self.name)
else:
return '='.join([str(self.name), str(self.default)])
def __repr__(self):
return 'Argument(%s, %s)' % (repr(self.name), repr(self.default))
def _defaults_match(self, arg):
return any(map(Argument.ignore_default, [self, arg])) \
or (self.default is Argument.any_default and
arg.default is not Argument.no_default) \
or (arg.default is Argument.any_default and
self.default is not Argument.no_default) \
or self.default == arg.default
def _names_match(self, arg):
return self.name == arg.name \
or self.name is Argument.ignore \
or arg.name is Argument.ignore
def matches(self, arg):
return self._names_match(arg) and self._defaults_match(arg)
__eq__ = matches
@staticmethod
def parse_argspec(callable_):
"""
Takes a callable and returns a tuple with the list of Argument objects,
the name of *args, and the name of **kwargs.
If *args or **kwargs is not present, it will be None.
This returns a namedtuple called Argspec that has three fields named:
args, starargs, and kwargs.
"""
args, varargs, keywords, defaults = inspect.getargspec(callable_)
defaults = list(defaults or [])
if getattr(callable_, '__self__', None) is not None:
# This is a bound method, drop the self param.
args = args[1:]
first_default = len(args) - len(defaults)
return Argspec(
[Argument(arg, Argument.no_default
if n < first_default else defaults[n - first_default])
for n, arg in enumerate(args)],
varargs,
keywords,
)
@staticmethod
def has_no_default(arg):
return arg.default is Argument.no_default
@staticmethod
def ignore_default(arg):
return arg.default is Argument.ignore
def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args)
def verify_callable_argspec(callable_,
expected_args=Argument.ignore,
expect_starargs=Argument.ignore,
expect_kwargs=Argument.ignore):
"""
Checks the callable_ to make sure that it satisfies the given
expectations.
expected_args should be an iterable of Arguments in the order you expect to
receive them.
expect_starargs means that the function should or should not take a *args
param. expect_kwargs says the callable should or should not take **kwargs
param.
If expected_args, expect_starargs, or expect_kwargs is Argument.ignore,
then the checks related to that argument will not occur.
Example usage:
callable_check(
f,
[Argument('a'), Argument('b', 1)],
expect_starargs=True,
expect_kwargs=Argument.ignore
)
"""
if not callable(callable_):
raise NotCallable(callable_)
expected_arg_list = list(
expected_args if expected_args is not Argument.ignore else []
)
args, starargs, kwargs = Argument.parse_argspec(callable_)
exc_args = callable_, args, starargs, kwargs
# Check the *args.
_expect_extra(
expect_starargs,
starargs,
UnexpectedStarargs,
NoStarargs,
exc_args,
)
# Check the **kwargs.
_expect_extra(
expect_kwargs,
kwargs,
UnexpectedKwargs,
NoKwargs,
exc_args,
)
if expected_args is Argument.ignore:
# Ignore the argument list checks.
return
if len(args) < len(expected_arg_list):
# One or more argument that we expected was not present.
raise NotEnoughArguments(
callable_,
args,
starargs,
kwargs,
[arg for arg in expected_arg_list if arg not in args],
)
elif len(args) > len(expected_arg_list):
raise TooManyArguments(
callable_, args, starargs, kwargs
)
# Empty argument that will not match with any actual arguments.
missing_arg = Argument(object(), object())
for expected, provided in zip_longest(expected_arg_list,
args,
fillvalue=missing_arg):
if not expected.matches(provided):
raise MismatchedArguments(
callable_, args, starargs, kwargs
)
class BadCallable(TypeError, AssertionError, ZiplineError):
"""
The given callable is not structured in the expected way.
"""
_lambda_name = (lambda: None).__name__
def __init__(self, callable_, args, starargs, kwargs):
self.callable_ = callable_
self.args = args
self.starargs = starargs
self.kwargsname = kwargs
self.kwargs = {}
def format_callable(self):
if self.callable_.__name__ == self._lambda_name:
fmt = '%s %s'
name = 'lambda'
else:
fmt = '%s(%s)'
name = self.callable_.__name__
return fmt % (
name,
', '.join(
chain(
(str(arg) for arg in self.args),
('*' + sa for sa in (self.starargs,) if sa is not None),
('**' + ka for ka in (self.kwargsname,) if ka is not None),
)
)
)
@property
def msg(self):
return str(self)
class NoStarargs(BadCallable):
def __str__(self):
return '%s does not allow for *args' % self.format_callable()
class UnexpectedStarargs(BadCallable):
def __str__(self):
return '%s should not allow for *args' % self.format_callable()
class NoKwargs(BadCallable):
def __str__(self):
return '%s does not allow for **kwargs' % self.format_callable()
class UnexpectedKwargs(BadCallable):
def __str__(self):
return '%s should not allow for **kwargs' % self.format_callable()
class NotCallable(BadCallable):
"""
The provided 'callable' is not actually a callable.
"""
def __init__(self, callable_):
self.callable_ = callable_
def __str__(self):
return '%s is not callable' % self.format_callable()
def format_callable(self):
try:
return self.callable_.__name__
except AttributeError:
return str(self.callable_)
class NotEnoughArguments(BadCallable):
"""
The callback does not accept enough arguments.
"""
def __init__(self, callable_, args, starargs, kwargs, missing_args):
super(NotEnoughArguments, self).__init__(
callable_, args, starargs, kwargs
)
self.missing_args = missing_args
def __str__(self):
missing_args = list(map(str, self.missing_args))
return '%s is missing argument%s: %s' % (
self.format_callable(),
's' if len(missing_args) > 1 else '',
', '.join(missing_args),
)
class TooManyArguments(BadCallable):
"""
The callback cannot be called by passing the expected number of arguments.
"""
def __str__(self):
return '%s accepts too many arguments' % self.format_callable()
class MismatchedArguments(BadCallable):
"""
The argument lists are of the same lengths, but not in the correct order.
"""
def __str__(self):
return '%s accepts mismatched parameters' % self.format_callable() | zipline-live2-vk | /zipline-live2-vk-1.3.0.7.0.18.tar.gz/zipline-live2-vk-1.3.0.7.0.18/zipline/utils/argcheck.py | argcheck.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.