code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from importlib import import_module
import os
from toolz import merge
from zipline.utils.calendar_utils import register_calendar, get_calendar
from zipline import run_algorithm
# These are used by test_examples.py to discover the examples to run.
def load_example_modules():
example_modules = {}
for f in os.listdir(os.path.dirname(__file__)):
if not f.endswith(".py") or f == "__init__.py" or f == "buyapple_ide.py":
continue
modname = f[: -len(".py")]
mod = import_module("." + modname, package=__name__)
example_modules[modname] = mod
globals()[modname] = mod
# Remove noise from loop variables.
del f, modname, mod
return example_modules
# Columns that we expect to be able to reliably deterministic
# Doesn't include fields that have UUIDS.
_cols_to_check = [
"algo_volatility",
"algorithm_period_return",
"alpha",
"benchmark_period_return",
"benchmark_volatility",
"beta",
"capital_used",
"ending_cash",
"ending_exposure",
"ending_value",
"excess_return",
"gross_leverage",
"long_exposure",
"long_value",
"longs_count",
"max_drawdown",
"max_leverage",
"net_leverage",
"period_close",
"period_label",
"period_open",
"pnl",
"portfolio_value",
"positions",
"returns",
"short_exposure",
"short_value",
"shorts_count",
"sortino",
"starting_cash",
"starting_exposure",
"starting_value",
"trading_days",
"treasury_period_return",
]
def run_example(example_modules, example_name, environ, benchmark_returns=None):
"""
Run an example module from zipline.examples.
"""
mod = example_modules[example_name]
register_calendar("YAHOO", get_calendar("NYSE"), force=True)
return run_algorithm(
initialize=getattr(mod, "initialize", None),
handle_data=getattr(mod, "handle_data", None),
before_trading_start=getattr(mod, "before_trading_start", None),
analyze=getattr(mod, "analyze", None),
bundle="test",
environ=environ,
benchmark_returns=benchmark_returns,
# Provide a default capital base, but allow the test to override.
**merge({"capital_base": 1e7}, mod._test_args()),
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/examples/__init__.py | __init__.py |
import os
from zipline.api import order_target, record, symbol
from zipline.finance import commission, slippage
def initialize(context):
context.sym = symbol("AAPL")
context.i = 0
# Explicitly set the commission/slippage to the "old" value until we can
# rebuild example data.
# github.com/quantopian/zipline/blob/master/tests/resources/
# rebuild_example_data#L105
context.set_commission(commission.PerShare(cost=0.0075, min_trade_cost=1.0))
context.set_slippage(slippage.VolumeShareSlippage())
def handle_data(context, data):
# Skip first 300 days to get full windows
context.i += 1
if context.i < 300:
return
# Compute averages
# history() has to be called with the same params
# from above and returns a pandas dataframe.
short_mavg = data.history(context.sym, "price", 100, "1d").mean()
long_mavg = data.history(context.sym, "price", 300, "1d").mean()
# Trading logic
if short_mavg > long_mavg:
# order_target orders as many shares as needed to
# achieve the desired number of shares.
order_target(context.sym, 100)
elif short_mavg < long_mavg:
order_target(context.sym, 0)
# Save values for later inspection
record(
AAPL=data.current(context.sym, "price"),
short_mavg=short_mavg,
long_mavg=long_mavg,
)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
import matplotlib.pyplot as plt
import logbook
logbook.StderrHandler().push_application()
log = logbook.Logger("Algorithm")
fig = plt.figure()
ax1 = fig.add_subplot(211)
results.portfolio_value.plot(ax=ax1)
ax1.set_ylabel("Portfolio value (USD)")
ax2 = fig.add_subplot(212)
ax2.set_ylabel("Price (USD)")
# If data has been record()ed, then plot it.
# Otherwise, log the fact that no data has been recorded.
if "AAPL" in results and "short_mavg" in results and "long_mavg" in results:
results["AAPL"].plot(ax=ax2)
results[["short_mavg", "long_mavg"]].plot(ax=ax2)
trans = results[[t != [] for t in results.transactions]]
buys = trans[[t[0]["amount"] > 0 for t in trans.transactions]]
sells = trans[[t[0]["amount"] < 0 for t in trans.transactions]]
ax2.plot(
buys.index,
results.short_mavg.loc[buys.index],
"^",
markersize=10,
color="m",
)
ax2.plot(
sells.index,
results.short_mavg.loc[sells.index],
"v",
markersize=10,
color="k",
)
plt.legend(loc=0)
else:
msg = "AAPL, short_mavg & long_mavg data not captured using record()."
ax2.annotate(msg, xy=(0.1, 0.5))
log.info(msg)
plt.show()
if "PYTEST_CURRENT_TEST" in os.environ:
plt.close("all")
def _test_args():
"""Extra arguments to use when zipline's automated tests run this example."""
import pandas as pd
return {
"start": pd.Timestamp("2011", tz="utc"),
"end": pd.Timestamp("2013", tz="utc"),
} | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/examples/dual_moving_average.py | dual_moving_average.py |
import pandas as pd
from zipline.errors import (
InvalidBenchmarkAsset,
BenchmarkAssetNotAvailableTooEarly,
BenchmarkAssetNotAvailableTooLate,
)
from pytz import timezone #20230420 (by MRC) 將錯誤訊息中的時間改為台灣時區
tz=timezone('Asia/Taipei') #20230420 (by MRC) 將錯誤訊息中的時間改為台灣時區
class BenchmarkSource(object):
def __init__(
self,
benchmark_asset,
trading_calendar,
sessions,
data_portal,
emission_rate="daily",
benchmark_returns=None,
):
self.benchmark_asset = benchmark_asset
self.sessions = sessions
self.emission_rate = emission_rate
self.data_portal = data_portal
if len(sessions) == 0:
self._precalculated_series = pd.Series()
elif benchmark_asset is not None:
self._validate_benchmark(benchmark_asset)
(
self._precalculated_series,
self._daily_returns,
) = self._initialize_precalculated_series(
benchmark_asset, trading_calendar, sessions, data_portal
)
elif benchmark_returns is not None:
self._daily_returns = daily_series = benchmark_returns.reindex(
sessions,
).fillna(0)
if self.emission_rate == "minute":
# we need to take the env's benchmark returns, which are daily,
# and resample them to minute
minutes = trading_calendar.minutes_for_sessions_in_range(
sessions[0], sessions[-1]
)
minute_series = daily_series.reindex(index=minutes, method="ffill")
self._precalculated_series = minute_series
else:
self._precalculated_series = daily_series
else:
raise Exception(
"Must provide either benchmark_asset or " "benchmark_returns."
)
def get_value(self, dt):
"""Look up the returns for a given dt.
Parameters
----------
dt : datetime
The label to look up.
Returns
-------
returns : float
The returns at the given dt or session.
See Also
--------
:class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[dt]
def get_range(self, start_dt, end_dt):
"""Look up the returns for a given period.
Parameters
----------
start_dt : datetime
The inclusive start label.
end_dt : datetime
The inclusive end label.
Returns
-------
returns : pd.Series
The series of returns.
See Also
--------
:class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[start_dt:end_dt]
def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
"""
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end]
def _validate_benchmark(self, benchmark_asset):
# check if this security has a stock dividend. if so, raise an
# error suggesting that the user pick a different asset to use
# as benchmark.
stock_dividends = self.data_portal.get_stock_dividends(
self.benchmark_asset, self.sessions
)
if len(stock_dividends) > 0:
raise InvalidBenchmarkAsset(
sid=str(self.benchmark_asset), dt=stock_dividends[0]["ex_date"]
)
if benchmark_asset.start_date > self.sessions[0]:
# the asset started trading after the first simulation day
raise BenchmarkAssetNotAvailableTooEarly(
sid=str(self.benchmark_asset),
dt=self.sessions[0],
start_dt=benchmark_asset.start_date,
)
if benchmark_asset.end_date < self.sessions[-1]:
# the asset stopped trading before the last simulation day
raise BenchmarkAssetNotAvailableTooLate(
sid=str(self.benchmark_asset),
dt=self.sessions[-1],
#dt=self.sessions[-1].astimezone(tz),
end_dt=benchmark_asset.end_date, #20230420 (by MRC) 改為台灣時區
#end_dt=benchmark_asset.end_date.astimezone(tz), #20230420 (by MRC) 改為台灣時區
)
@staticmethod
def _compute_daily_returns(g):
return (g[-1] - g[0]) / g[0]
@classmethod
def downsample_minute_return_series(cls, trading_calendar, minutely_returns):
sessions = trading_calendar.minute_index_to_session_labels(
minutely_returns.index,
)
closes = trading_calendar.session_closes_in_range(
sessions[0],
sessions[-1],
)
daily_returns = minutely_returns[closes].pct_change()
daily_returns.index = closes.index
return daily_returns.iloc[1:]
def _initialize_precalculated_series(
self, asset, trading_calendar, trading_days, data_portal
):
"""
Internal method that pre-calculates the benchmark return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the benchmark asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the benchmark asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
benchmark_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
return (
benchmark_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
benchmark_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for benchmark_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
returns = benchmark_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
benchmark_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
"open",
trading_days[0],
"daily",
)
first_close = data_portal.get_spot_value(
asset,
"close",
trading_days[0],
"daily",
)
first_day_return = (first_close - first_open) / first_open
returns = benchmark_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
"cannot set benchmark to asset that does not exist during"
" the simulation period (asset start date=%r)" % start_date
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/sources/benchmark_source.py | benchmark_source.py |
import TejToolAPI
import zipline.algorithm as algo
from zipline.utils.calendar_utils import get_calendar, TradingCalendar
from zipline.utils.input_validation import expect_types
from zipline.errors import (MultipleSymbolsFound,
SymbolNotFound,
ZiplineError,
IllegalValueException,
EmptyOutputException
)
import zipline.pipeline.domain as domain
from zipline.pipeline.pipeline import Pipeline
from zipline.pipeline.data import tejquant, EquityPricing
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders import EquityPricingLoader
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.data import bundles
import os
import pandas as pd
import numpy
import tejapi
import datetime
import pytz
from logbook import Logger
logger = Logger("Requests Source Logger(TEJ API)")
tejapi.ApiConfig.page_limit=10000
try :
tejapi.ApiConfig.api_base = os.environ['TEJAPI_BASE']
except :
raise KeyError("Cant't load TEJAPI_BASE to get dynamic calendar information. Please set your TEJAPI_BASE. os.environ['TEJAPI_BASE'] = <YOUR_BASE>")
try :
tejapi.ApiConfig.api_key = os.environ['TEJAPI_KEY']
except :
raise KeyError("Cant't load TEJAPI_KEY to get dynamic calendar information. Please check your TEJAPI_KEY. os.environ['TEJAPI_KEY'] = <YOUR_KEY>")
# Map from liquidity risk management rules to columns name in tej tool api.
# See also:func:`algorithm.set_liquidity_risk_management_rule`.
LIQUIDITY_RISK_COLUMNS = {'Full_Delivery':'Full_Delivery_Stock_Fg',
'Attention':'Attention_Stock_Fg',
'Disposition':'Disposition_Stock_Fg',
#'Suspended':'Suspended_Trading_Stock_Fg',
'Limit_Up_or_Down_in_Opening':'Limit_Up_or_Down_in_Opening_Fg',
'Suspension_of_Buy_After_Day_Trading':'Suspension_of_Buy_After_Day_Trading_Fg'
}
# Map bundle name to DataSet and calendar_name.
# 新增bundle時需新增BUNDLE_INFO
BUNDLE_INFO = {'tquant': {'bundle_name':'tquant',
'calendar_name':'TEJ_XTAI',
'DataSet':tejquant.TQDataSet}
}
# get_history_data:設定transfer_to_chinese = False,其餘與TejToolAPI相同。
def get_history_data(ticker,
columns,
start=None,
end=None,
fin_type=None,
include_self_acc=None,
**kwargs
):
try:
df = TejToolAPI.get_history_data(ticker = ticker,
columns = columns,
start = start,
end = end,
transfer_to_chinese = False,
fin_type = fin_type,
include_self_acc = include_self_acc,
**kwargs)
df = df.reset_index(drop=True)
except Exception as e:
raise ValueError(f'Error occurs while downloading data by func:`TejToolAPI.get_history_data` due to {e} .')
return df
#################
### pipeline ###
#################
@expect_types(df=pd.DataFrame,
bundle_name=str or type(None))
def to_dict(df, bundle_name='tquant'):
bundle = bundles.load(bundle_name)
dfs = {}
df['coid'] = df['coid'].astype(str)
df = df.set_index(['coid', 'mdate'])
for i in df.columns:
symbols = df.index.get_level_values(0).unique().tolist()
assets = bundle.asset_finder.lookup_symbols(symbols, as_of_date=None)
sids = pd.Int64Index([asset.sid for asset in assets])
symbol_map = dict(zip(symbols, sids))
dfs[i] = (df[i]
.unstack('coid')
.rename(columns=symbol_map)
.tz_localize('UTC')
)
return dfs
@expect_types(dfs=dict,
bundle_name=str or type(None)
)
def create_custom_loader(dfs, bundle_name='tquant'):
custom_loader = {}
column_names = []
bundle = bundles.load(bundle_name)
dataset = BUNDLE_INFO.get(bundle_name).get('DataSet')
for i in dataset.columns:
if i.name in list(dfs.keys()):
custom_loader.update({i:DataFrameLoader(i,dfs[i.name])})
column_names.append(i.name)
logger.info(
"\n `DataSet` used: {data},\n `Column` used: {Column},\n column names: {column_names}",
data=str(dataset),
Column=list(custom_loader.keys()),
column_names=column_names,
)
return custom_loader
@expect_types(bundle_name=str or type(None),
#to_db=type(None),
#db=type(None)
)
def create_custom_loader_for_algo(ticker,
columns,
start=None,
end=None,
fin_type=None,
include_self_acc=None,
bundle_name='tquant'):
#to_db=None, # TODO:若db不存在,建db
#db=None): # TODO:先從db撈資料,若fail再get_history_data()
raw = get_history_data(ticker,
columns,
start,
end,
fin_type,
include_self_acc
)
dfs = to_dict(raw, bundle_name)
custom_loader = create_custom_loader(dfs, bundle_name)
return raw, custom_loader
@expect_types(custom_loader=dict,
bundle_name=str or type(None),
pipeline=Pipeline
)
def run_pipeline(custom_loader, pipeline, start_date, end_date, bundle_name='tquant'):
bundle = bundles.load(bundle_name)
dataset = BUNDLE_INFO.get(bundle_name).get('DataSet')
calendar_name = BUNDLE_INFO.get(bundle_name).get('calendar_name')
pricing_loader = EquityPricingLoader.without_fx(bundle.equity_daily_bar_reader,
bundle.adjustment_reader)
def choose_loader(column):
if column.name in EquityPricing._column_names:
return pricing_loader
elif column.name in dataset._column_names:
return custom_loader[column]
else:
raise Exception('Column: {} not available'.format(str(column.name)))
engine = SimplePipelineEngine(get_loader = choose_loader,
asset_finder = bundle.asset_finder,
default_domain = algo._DEFAULT_DOMAINS.get(calendar_name))
pipeline_output = engine.run_pipeline(pipeline, start_date, end_date)
return pipeline_output
#####################
### FETCH_TEJ_API ###
#####################
class PandasRequestsTEJ_API(object):
'''
新增data_source到data_portal供run_algorithm()回測時使用
see also:zipline.algorithm
'''
def __init__(
self,
symbol_column,
date_column,
date_format,
trading_day,
asset_finder,
columns,
symbols,
timezone,
start,
end,
fin_type,
include_self_acc,
import_data,
country_code,
pre_func,
post_func,
data_frequency,
**kwargs,
):
self.timezone = timezone
self.symbol_column = symbol_column or "coid"
self.date_column = date_column or "mdate"
self.data_frequency = data_frequency
self.country_code = country_code
self.date_format = None
#self.trading_calendar=trading_calendar
self.finder = asset_finder
self.trading_day = trading_day
#self.trading_day=self.trading_calendar.day
self.start = start
self.end = end
self.columns = columns
self.symbols = symbols
self.fin_type = fin_type
self.include_self_acc = include_self_acc
self.pre_func = pre_func
self.post_func = post_func
self.import_data=import_data
#self.df = self.load_df()
def get_history_data(self):
df = get_history_data(ticker = self.symbols,
columns = self.columns,
start = self.start,
end = self.end,
fin_type = self.fin_type,
include_self_acc = self.include_self_acc)
if len(df)==0:
raise EmptyOutputException(function = '"PandasRequestsTEJ_API.get_history_data"',
dtype = '"dataframe"')
return df
# 時區,要改?
def roll_dts_to_midnight(self, dts, trading_day):
if len(dts) == 0:
return dts
return (
pd.DatetimeIndex(
(dts.tz_convert("US/Eastern") - pd.Timedelta(hours=16)).date,
tz="UTC",
)
+ trading_day
)
def parse_date_str_series(
self, format_str, tz, date_str_series, data_frequency, trading_day
):
"""
Efficient parsing for a 1d Pandas/numpy object containing string
representations of dates.
Note: pd.to_datetime is significantly faster when no format string is
passed, and in pandas 0.12.0 the %p strptime directive is not correctly
handled if a format string is explicitly passed, but AM/PM is handled
properly if format=None.
Moreover, we were previously ignoring this parameter unintentionally
because we were incorrectly passing it as a positional. For all these
reasons, we ignore the format_str parameter when parsing datetimes.
"""
# Explicitly ignoring this parameter. See note above.
if format_str is not None:
logger.warn(
"The 'format_str' parameter to fetch_csv is deprecated. "
"Ignoring and defaulting to pandas default date parsing."
)
format_str = None
tz_str = str(tz)
if tz_str == pytz.utc.zone:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
utc=True,
errors="coerce",
)
else:
parsed = (
pd.to_datetime(
date_str_series.values,
format=format_str,
errors="coerce",
)
.tz_localize(tz_str)
.tz_convert("UTC")
)
if data_frequency == "daily":
parsed = self.roll_dts_to_midnight(parsed, trading_day)
return parsed
def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(
uppered,
as_of_date=None,
country_code=self.country_code,
)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan
def load_df(self):
if self.import_data is None:
df = self.get_history_data()
df = df.sort_values(by=['mdate','coid'])
elif self.import_data is not None:
df = self.import_data
else:
pass
if self.pre_func:
df.reset_index(drop=True,inplace=True)
df = self.pre_func(df)
#必要(不然df.iloc[row_idx, df.columns.get_loc("sid")] = asset這個語法會有問題,導致sid配錯)
df.reset_index(drop=True,inplace=True)
# Batch-convert the user-specifed date column into timestamps.
df["dt"] = self.parse_date_str_series(
self.date_format,
self.timezone,
df[self.date_column],
self.data_frequency,
self.trading_day,
).values
# ignore rows whose dates we couldn't parse
df = df[df["dt"].notnull()]
# if self.symbol is not None:
# df["sid"] = self.symbol
if self.finder:
df.sort_values(by=self.symbol_column, inplace=True)
# Pop the 'sid' column off of the DataFrame, just in case the user
# has assigned it, and throw a warning
try:
df.pop("sid")
warnings.warn(
"Assignment of the 'sid' column of a DataFrame is "
"not supported by Fetcher. The 'sid' column has been "
"overwritten.",
category=UserWarning,
stacklevel=2,
)
except KeyError:
# There was no 'sid' column, so no warning is necessary
pass
# Fill entries for any symbols that don't require a date to
# uniquely identify. Entries for which multiple securities exist
# are replaced with zeroes, while entries for which no asset
# exists are replaced with NaNs.
unique_symbols = df[self.symbol_column].unique()
sid_series = pd.Series(
data=map(self._lookup_unconflicted_symbol, unique_symbols),
index=unique_symbols,
name="sid",
)
df = df.join(sid_series, on=self.symbol_column)
# Fill any zero entries left in our sid column by doing a lookup
# using both symbol and the row date.
conflict_rows = df[df["sid"] == 0]
for row_idx, row in conflict_rows.iterrows():
try:
asset = (
self.finder.lookup_symbol(
row[self.symbol_column],
# Replacing tzinfo here is necessary because of the
# timezone metadata bug described below.
row["dt"].replace(tzinfo=pytz.utc),
country_code=self.country_code,
# It's possible that no asset comes back here if our
# lookup date is from before any asset held the
# requested symbol. Mark such cases as NaN so that
# they get dropped in the next step.
)
or numpy.nan
)
except SymbolNotFound:
asset = numpy.nan
# Assign the resolved asset to the cell
df.iloc[row_idx, df.columns.get_loc("sid")] = asset
# Filter out rows containing symbols that we failed to find.
length_before_drop = len(df)
df = df[df["sid"].notnull()]
no_sid_count = length_before_drop - len(df)
if no_sid_count:
logger.warn(
"Dropped {} rows from fetched tej api.".format(no_sid_count),
no_sid_count,
extra={"syslog": True},
)
else:
pass
# Dates are localized to UTC when they come out of
# parse_date_str_series, but we need to re-localize them here because
# of a bug that wasn't fixed until
# https://github.com/pydata/pandas/pull/7092.
# We should be able to remove the call to tz_localize once we're on
# pandas 0.14.0
# We don't set 'dt' as the index until here because the Symbol parsing
# operations above depend on having a unique index for the dataframe,
# and the 'dt' column can contain multiple dates for the same entry.
df.drop_duplicates(["sid", "dt"])
df.set_index(["dt"], inplace=True)
df = df.tz_localize("UTC")
df.sort_index(inplace=True)
cols_to_drop = [self.date_column]
cols_to_drop.append(self.symbol_column)
df = df[df.columns.drop(cols_to_drop)]
if self.post_func:
df = self.post_func(df)
return df
#################
### Benchmark ###
#################
@expect_types(symbol=str or type(None))
def get_Benchmark_Return(start,
end,
symbol='IR0001'):
'''
為了計算benchmark_period_return,benchmark_volatility...
return
------------
Series
'''
# TODO:chk symbol是否為報酬指數(用coid前兩碼是否為IR來判定)
TEJ_Api_data_source=PandasRequestsTEJ_API(
symbol_column = None,
date_column = None,
date_format = None,
trading_day = None,
asset_finder= None,
columns = ['roi'], # ['coid','mdate','roi']
symbols = [symbol],
timezone = None,
start = start,
end = end,
fin_type = None,
include_self_acc = None,
import_data = None,
country_code = None,
data_frequency = None,
pre_func = None,
post_func = None)
df = TEJ_Api_data_source.get_history_data()
if len(df)==0:
raise EmptyOutputException(function = '"get_Benchmark_Return"',
dtype = '"dataframe"')
#將利率單位轉成%
df.iloc[:,2] = df.iloc[:,2] / 100 #預防欄位名稱更改(改用iloc)
ser = pd.Series(data = df.iloc[:,2].values, index = df['mdate']) #預防欄位名稱更改(改用iloc)
#參考: zipline\data\benchmarks.py
if not ser.index.tz:
ser = ser.tz_localize("utc")
return ser.sort_index(ascending=True)
#################
### Treasury ###
#################
def Treasury_Return_TW(start,
end,
rate_type,
term,
symbol = None):
'''
TW Treasury:為了計算excess return...
return
------------
Series
'''
valid_rate_type = ['Time_Deposit_Rate','TAIBOR','Gov_Bond']
Time_Deposit_Rate_terms = {'1m':'fld005',
'3m':'fld006',
'6m':'fld007',
'9m':'fld008',
'1y':'fld009'}
Benchmark_Gov_Bond_terms = {#'0y':'TY00',
'2y':'TY02',
'5y':'TY05',
'10y':'TY10',
'12y':'TY12',
'20y':'TY20',
'30y':'TY30'}
TAIBOR_terms = {'1w':'tw1',
'2w':'tw2',
'1m':'tm1',
'2m':'tm2',
'3m':'tm3',
'6m':'tm6',
'9m':'tm9',
'1y':'tm12'}
Valid_Time_Deposit_Rate = {'5844':'第一銀行'}
if rate_type not in valid_rate_type:
raise IllegalValueException(parameter = '"rate_type"',
value = valid_rate_type)
elif rate_type=='Time_Deposit_Rate' and term not in Time_Deposit_Rate_terms.keys():
raise IllegalValueException(parameter = '"terms"',
value = str(list(set(Time_Deposit_Rate_terms.keys()))))
elif rate_type=='Gov_Bond' and term not in Benchmark_Gov_Bond_terms.keys():
raise IllegalValueException(parameter = '"terms"',
value = str(list(set(Benchmark_Gov_Bond_terms.keys()))))
elif rate_type=='TAIBOR' and term not in TAIBOR_terms.keys():
raise IllegalValueException(parameter = '"terms"',
value = str(list(set(TAIBOR_terms.keys()))))
else:
pass
# 銀行定存利率
if rate_type=='Time_Deposit_Rate':
if symbol not in Valid_Time_Deposit_Rate:
raise IllegalValueException(parameter = '"symbol"',
value = str(list(set(Valid_Time_Deposit_Rate.keys()))))
df = tejapi.get('TWN/ARATE',
coid = '5844',
opts = {'columns':['coid','mdate',Time_Deposit_Rate_terms[term]]},
mdate = {'gte':start,'lte':end},
paginate = True)
# TAIBOR
elif rate_type=='TAIBOR':
df = tejapi.get('GLOBAL/WIBOR1',
coid = 'Z9999',
opts = {'columns':['coid','mdate',TAIBOR_terms[term]]},
mdate = {'gte':start,'lte':end},
paginate = True)
# 指標公債殖利率
elif rate_type=='Gov_Bond':
df = tejapi.get('TWN/AGBD8A',
coid = Benchmark_Gov_Bond_terms[term],
opts = {'columns':['coid','mdate','yield']},
mdate = {'gte':start,'lte':end},
paginate = True)
return df
@expect_types(
rate_type=str,
term=str,
symbol=str or type(None) # can be None
)
def get_Treasury_Return(start,
end,
rate_type,
term,
symbol = None,
trading_calendar = get_calendar('TEJ_XTAI')):
'''
為了計算excess return...
return
------------
Series
'''
country_code = algo._DEFAULT_FETCH_CSV_COUNTRY_CODES.get(trading_calendar.name)
'''
# query country_code
# see also : zipline.country
from iso3166 import countries_by_name
name = "TAIWAN, PROVINCE OF CHINA"
print(countries_by_name[name].alpha2)
'''
TREASURY_FUNC_NAMES = {
'TW': Treasury_Return_TW
}
if country_code not in TREASURY_FUNC_NAMES.keys():
raise IllegalValueException(parameter = '"country_code"',
value = str(list(set(TREASURY_FUNC_NAMES.keys()))))
try:
df = TREASURY_FUNC_NAMES[country_code](start = start,
end = end,
rate_type = rate_type,
term = term,
symbol = symbol)
except Exception as e:
raise ValueError(f'Error occurs while downloading data by get_history_data() due to {e} .')
if len(df)==0:
raise EmptyOutputException(function = '"get_Treasury_Return"',
dtype = '"dataframe"')
#將利率單位轉成%,再將年利率轉為日利率
df.iloc[:,2] = df.iloc[:,2].apply(lambda x: pow((1 + x / 100), (1 / 252)) - 1)
ser = pd.Series(data = df.iloc[:,2].values, index = df['mdate'])
#See also:zipline.data.benchmarks
if not ser.index.tz:
ser = ser.tz_localize("utc")
return ser.sort_index(ascending=True) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/sources/TEJ_Api_Data.py | TEJ_Api_Data.py |
import pandas as pd
from zipline.errors import (
InvalidTreasuryAsset,
TreasuryAssetNotAvailableTooEarly,
TreasuryAssetNotAvailableTooLate,
)
class TreasurySource(object):
def __init__(
self,
treasury_asset,
trading_calendar,
sessions,
data_portal,
emission_rate="daily",
treasury_returns=None,
):
self.treasury_asset = treasury_asset
self.sessions = sessions
self.emission_rate = emission_rate
self.data_portal = data_portal
if len(sessions) == 0:
self._precalculated_series = pd.Series()
#useless now
elif treasury_asset is not None:
self._validate_treasury(treasury_asset)
(
self._precalculated_series,
self._daily_returns,
) = self._initialize_precalculated_series(
treasury_asset, trading_calendar, sessions, data_portal
)
elif treasury_returns is not None:
self._daily_returns = daily_series = treasury_returns.reindex(
sessions,
).fillna(0)
if self.emission_rate == "minute":
# we need to take the env's treasury returns, which are daily,
# and resample them to minute
minutes = trading_calendar.minutes_for_sessions_in_range(
sessions[0], sessions[-1]
)
minute_series = daily_series.reindex(index=minutes, method="ffill")
self._precalculated_series = minute_series
else:
self._precalculated_series = daily_series
else:
raise Exception(
"Must provide either treasury_asset or " "treasury_returns."
)
def get_value(self, dt):
"""Look up the returns for a given dt.
Parameters
----------
dt : datetime
The label to look up.
Returns
-------
returns : float
The returns at the given dt or session.
See Also
--------
:class:`zipline.sources.treasury_source.TreasurySource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[dt]
def get_range(self, start_dt, end_dt):
"""Look up the returns for a given period.
Parameters
----------
start_dt : datetime
The inclusive start label.
end_dt : datetime
The inclusive end label.
Returns
-------
returns : pd.Series
The series of returns.
See Also
--------
:class:`zipline.sources.treasury_source.TreasurySource.daily_returns`
.. warning::
This method expects minute inputs if ``emission_rate == 'minute'``
and session labels when ``emission_rate == 'daily``.
"""
return self._precalculated_series.loc[start_dt:end_dt]
def daily_returns(self, start, end=None):
"""Returns the daily returns for the given period.
Parameters
----------
start : datetime
The inclusive starting session label.
end : datetime, optional
The inclusive ending session label. If not provided, treat
``start`` as a scalar key.
Returns
-------
returns : pd.Series or float
The returns in the given period. The index will be the trading
calendar in the range [start, end]. If just ``start`` is provided,
return the scalar value on that day.
"""
if end is None:
return self._daily_returns[start]
return self._daily_returns[start:end]
def _validate_treasury(self, treasury_asset):
# check if this security has a stock dividend. if so, raise an
# error suggesting that the user pick a different asset to use
# as treasury.
stock_dividends = self.data_portal.get_stock_dividends(
self.treasury_asset, self.sessions
)
if len(stock_dividends) > 0:
raise InvalidTreasuryAsset(
sid=str(self.treasury_asset), dt=stock_dividends[0]["ex_date"]
)
if treasury_asset.start_date > self.sessions[0]:
# the asset started trading after the first simulation day
raise TreasuryAssetNotAvailableTooEarly(
sid=str(self.treasury_asset),
dt=self.sessions[0],
start_dt=treasury_asset.start_date,
)
if treasury_asset.end_date < self.sessions[-1]:
# the asset stopped trading before the last simulation day
raise TreasuryAssetNotAvailableTooLate(
sid=str(self.treasury_asset),
dt=self.sessions[-1],
end_dt=treasury_asset.end_date,
)
@staticmethod
def _compute_daily_returns(g):
return (g[-1] - g[0]) / g[0]
@classmethod
def downsample_minute_return_series(cls, trading_calendar, minutely_returns):
sessions = trading_calendar.minute_index_to_session_labels(
minutely_returns.index,
)
closes = trading_calendar.session_closes_in_range(
sessions[0],
sessions[-1],
)
daily_returns = minutely_returns[closes].pct_change()
daily_returns.index = closes.index
return daily_returns.iloc[1:]
def _initialize_precalculated_series(
self, asset, trading_calendar, trading_days, data_portal
):
"""
Internal method that pre-calculates the treasury return series for
use in the simulation.
Parameters
----------
asset: Asset to use
trading_calendar: TradingCalendar
trading_days: pd.DateTimeIndex
data_portal: DataPortal
Notes
-----
If the treasury asset started trading after the simulation start,
or finished trading before the simulation end, exceptions are raised.
If the treasury asset started trading the same day as the simulation
start, the first available minute price on that day is used instead
of the previous close.
We use history to get an adjusted price history for each day's close,
as of the look-back date (the last day of the simulation). Prices are
fully adjusted for dividends, splits, and mergers.
Returns
-------
returns : pd.Series
indexed by trading day, whose values represent the %
change from close to close.
daily_returns : pd.Series
the partial daily returns for each minute
"""
if self.emission_rate == "minute":
minutes = trading_calendar.minutes_for_sessions_in_range(
self.sessions[0], self.sessions[-1]
)
treasury_series = data_portal.get_history_window(
[asset],
minutes[-1],
bar_count=len(minutes) + 1,
frequency="1m",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
return (
treasury_series.pct_change()[1:],
self.downsample_minute_return_series(
trading_calendar,
treasury_series,
),
)
start_date = asset.start_date
if start_date < trading_days[0]:
# get the window of close prices for treasury_asset from the
# last trading day of the simulation, going up to one day
# before the simulation start day (so that we can get the %
# change on day 1)
treasury_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days) + 1,
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
returns = treasury_series.pct_change()[1:]
return returns, returns
elif start_date == trading_days[0]:
# Attempt to handle case where stock data starts on first
# day, in this case use the open to close return.
treasury_series = data_portal.get_history_window(
[asset],
trading_days[-1],
bar_count=len(trading_days),
frequency="1d",
field="price",
data_frequency=self.emission_rate,
ffill=True,
)[asset]
# get a minute history window of the first day
first_open = data_portal.get_spot_value(
asset,
"open",
trading_days[0],
"daily",
)
first_close = data_portal.get_spot_value(
asset,
"close",
trading_days[0],
"daily",
)
first_day_return = (first_close - first_open) / first_open
returns = treasury_series.pct_change()[:]
returns[0] = first_day_return
return returns, returns
else:
raise ValueError(
"cannot set treasury to asset that does not exist during"
" the simulation period (asset start date=%r)" % start_date
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/sources/treasury_source.py | treasury_source.py |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import hashlib
from textwrap import dedent
import warnings
from logbook import Logger
import numpy
import pandas as pd
from pandas import read_csv
import pytz
import requests
from io import StringIO
from zipline.errors import MultipleSymbolsFound, SymbolNotFound, ZiplineError
from zipline.protocol import DATASOURCE_TYPE, Event
from zipline.assets import Equity
logger = Logger("Requests Source Logger")
def roll_dts_to_midnight(dts, trading_day):
if len(dts) == 0:
return dts
return (
pd.DatetimeIndex(
(dts.tz_convert("US/Eastern") - pd.Timedelta(hours=16)).date,
tz="UTC",
)
+ trading_day
)
class FetcherEvent(Event):
pass
class FetcherCSVRedirectError(ZiplineError):
msg = dedent(
"""\
Attempt to fetch_csv from a redirected url. {url}
must be changed to {new_url}
"""
)
def __init__(self, *args, **kwargs):
self.url = kwargs["url"]
self.new_url = kwargs["new_url"]
self.extra = kwargs["extra"]
super(FetcherCSVRedirectError, self).__init__(*args, **kwargs)
# The following optional arguments are supported for
# requests backed data sources.
# see https://requests.readthedocs.io/en/latest/api/#main-interface
# for a full list.
ALLOWED_REQUESTS_KWARGS = {"params", "headers", "auth", "cert"}
# The following optional arguments are supported for pandas' read_csv
# function, and may be passed as kwargs to the datasource below.
# see https://pandas.pydata.org/
# pandas-docs/stable/generated/pandas.io.parsers.read_csv.html
ALLOWED_READ_CSV_KWARGS = {
"sep",
"dialect",
"doublequote",
"escapechar",
"quotechar",
"quoting",
"skipinitialspace",
"lineterminator",
"header",
"index_col",
"names",
"prefix",
"skiprows",
"skipfooter",
"skip_footer",
"na_values",
"true_values",
"false_values",
"delimiter",
"converters",
"dtype",
"delim_whitespace",
"as_recarray",
"na_filter",
"compact_ints",
"use_unsigned",
"buffer_lines",
"warn_bad_lines",
"error_bad_lines",
"keep_default_na",
"thousands",
"comment",
"decimal",
"keep_date_col",
"nrows",
"chunksize",
"encoding",
"usecols",
}
SHARED_REQUESTS_KWARGS = {
"stream": True,
"allow_redirects": False,
}
def mask_requests_args(url, validating=False, params_checker=None, **kwargs):
requests_kwargs = {
key: val
for (key, val) in kwargs.items()
if key in ALLOWED_REQUESTS_KWARGS
}
if params_checker is not None:
url, s_params = params_checker(url)
if s_params:
if "params" in requests_kwargs:
requests_kwargs["params"].update(s_params)
else:
requests_kwargs["params"] = s_params
# Giving the connection 30 seconds. This timeout does not
# apply to the download of the response body.
# (Note that Quandl links can take >10 seconds to return their
# first byte on occasion)
requests_kwargs["timeout"] = 1.0 if validating else 30.0
requests_kwargs.update(SHARED_REQUESTS_KWARGS)
request_pair = namedtuple("RequestPair", ("requests_kwargs", "url"))
return request_pair(requests_kwargs, url)
class PandasCSV(object, metaclass=ABCMeta):
def __init__(
self,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
country_code,
**kwargs,
):
self.start_date = start_date
self.end_date = end_date
self.date_column = date_column
self.date_format = date_format
self.timezone = timezone
self.mask = mask
self.symbol_column = symbol_column or "symbol"
self.data_frequency = data_frequency
self.country_code = country_code
invalid_kwargs = set(kwargs) - ALLOWED_READ_CSV_KWARGS
if invalid_kwargs:
raise TypeError(
"Unexpected keyword arguments: %s" % invalid_kwargs,
)
self.pandas_kwargs = self.mask_pandas_args(kwargs)
self.symbol = symbol
self.finder = asset_finder
self.trading_day = trading_day
self.pre_func = pre_func
self.post_func = post_func
@property
def fields(self):
return self.df.columns.tolist()
def get_hash(self):
return self.namestring
@abstractmethod
def fetch_data(self):
return
@staticmethod
def parse_date_str_series(
format_str, tz, date_str_series, data_frequency, trading_day
):
"""
Efficient parsing for a 1d Pandas/numpy object containing string
representations of dates.
Note: pd.to_datetime is significantly faster when no format string is
passed, and in pandas 0.12.0 the %p strptime directive is not correctly
handled if a format string is explicitly passed, but AM/PM is handled
properly if format=None.
Moreover, we were previously ignoring this parameter unintentionally
because we were incorrectly passing it as a positional. For all these
reasons, we ignore the format_str parameter when parsing datetimes.
"""
# Explicitly ignoring this parameter. See note above.
if format_str is not None:
logger.warn(
"The 'format_str' parameter to fetch_csv is deprecated. "
"Ignoring and defaulting to pandas default date parsing."
)
format_str = None
tz_str = str(tz)
if tz_str == pytz.utc.zone:
parsed = pd.to_datetime(
date_str_series.values,
format=format_str,
utc=True,
errors="coerce",
)
else:
parsed = (
pd.to_datetime(
date_str_series.values,
format=format_str,
errors="coerce",
)
.tz_localize(tz_str)
.tz_convert("UTC")
)
if data_frequency == "daily":
parsed = roll_dts_to_midnight(parsed, trading_day)
return parsed
def mask_pandas_args(self, kwargs):
pandas_kwargs = {
key: val
for (key, val) in kwargs.items()
if key in ALLOWED_READ_CSV_KWARGS
}
if "usecols" in pandas_kwargs:
usecols = pandas_kwargs["usecols"]
if usecols and self.date_column not in usecols:
# make a new list so we don't modify user's,
# and to ensure it is mutable
with_date = list(usecols)
with_date.append(self.date_column)
pandas_kwargs["usecols"] = with_date
# No strings in the 'symbol' column should be interpreted as NaNs
pandas_kwargs.setdefault("keep_default_na", False)
pandas_kwargs.setdefault("na_values", {"symbol": []})
return pandas_kwargs
def _lookup_unconflicted_symbol(self, symbol):
"""
Attempt to find a unique asset whose symbol is the given string.
If multiple assets have held the given symbol, return a 0.
If no asset has held the given symbol, return a NaN.
"""
try:
uppered = symbol.upper()
except AttributeError:
# The mapping fails because symbol was a non-string
return numpy.nan
try:
return self.finder.lookup_symbol(
uppered,
as_of_date=None,
country_code=self.country_code,
)
except MultipleSymbolsFound:
# Fill conflicted entries with zeros to mark that they need to be
# resolved by date.
return 0
except SymbolNotFound:
# Fill not found entries with nans.
return numpy.nan
def load_df(self):
df = self.fetch_data()
if self.pre_func:
df = self.pre_func(df)
# Batch-convert the user-specifed date column into timestamps.
df["dt"] = self.parse_date_str_series(
self.date_format,
self.timezone,
df[self.date_column],
self.data_frequency,
self.trading_day,
).values
# ignore rows whose dates we couldn't parse
df = df[df["dt"].notnull()]
if self.symbol is not None:
df["sid"] = self.symbol
elif self.finder:
df.sort_values(by=self.symbol_column, inplace=True)
# Pop the 'sid' column off of the DataFrame, just in case the user
# has assigned it, and throw a warning
try:
df.pop("sid")
warnings.warn(
"Assignment of the 'sid' column of a DataFrame is "
"not supported by Fetcher. The 'sid' column has been "
"overwritten.",
category=UserWarning,
stacklevel=2,
)
except KeyError:
# There was no 'sid' column, so no warning is necessary
pass
# Fill entries for any symbols that don't require a date to
# uniquely identify. Entries for which multiple securities exist
# are replaced with zeroes, while entries for which no asset
# exists are replaced with NaNs.
unique_symbols = df[self.symbol_column].unique()
sid_series = pd.Series(
data=map(self._lookup_unconflicted_symbol, unique_symbols),
index=unique_symbols,
name="sid",
)
df = df.join(sid_series, on=self.symbol_column)
# Fill any zero entries left in our sid column by doing a lookup
# using both symbol and the row date.
conflict_rows = df[df["sid"] == 0]
for row_idx, row in conflict_rows.iterrows():
try:
asset = (
self.finder.lookup_symbol(
row[self.symbol_column],
# Replacing tzinfo here is necessary because of the
# timezone metadata bug described below.
row["dt"].replace(tzinfo=pytz.utc),
country_code=self.country_code,
# It's possible that no asset comes back here if our
# lookup date is from before any asset held the
# requested symbol. Mark such cases as NaN so that
# they get dropped in the next step.
)
or numpy.nan
)
except SymbolNotFound:
asset = numpy.nan
# Assign the resolved asset to the cell
df.iloc[row_idx, df.columns.get_loc("sid")] = asset
# Filter out rows containing symbols that we failed to find.
length_before_drop = len(df)
df = df[df["sid"].notnull()]
no_sid_count = length_before_drop - len(df)
if no_sid_count:
logger.warn(
"Dropped {} rows from fetched csv.".format(no_sid_count),
no_sid_count,
extra={"syslog": True},
)
else:
df["sid"] = df["symbol"]
# Dates are localized to UTC when they come out of
# parse_date_str_series, but we need to re-localize them here because
# of a bug that wasn't fixed until
# https://github.com/pydata/pandas/pull/7092.
# We should be able to remove the call to tz_localize once we're on
# pandas 0.14.0
# We don't set 'dt' as the index until here because the Symbol parsing
# operations above depend on having a unique index for the dataframe,
# and the 'dt' column can contain multiple dates for the same entry.
df.drop_duplicates(["sid", "dt"])
df.set_index(["dt"], inplace=True)
df = df.tz_localize("UTC")
df.sort_index(inplace=True)
cols_to_drop = [self.date_column]
if self.symbol is None:
cols_to_drop.append(self.symbol_column)
df = df[df.columns.drop(cols_to_drop)]
if self.post_func:
df = self.post_func(df)
return df
def __iter__(self):
asset_cache = {}
for dt, series in self.df.iterrows():
if dt < self.start_date:
continue
if dt > self.end_date:
return
event = FetcherEvent()
# when dt column is converted to be the dataframe's index
# the dt column is dropped. So, we need to manually copy
# dt into the event.
event.dt = dt
for k, v in series.iteritems():
# convert numpy integer types to
# int. This assumes we are on a 64bit
# platform that will not lose information
# by casting.
# TODO: this is only necessary on the
# amazon qexec instances. would be good
# to figure out how to use the numpy dtypes
# without this check and casting.
if isinstance(v, numpy.integer):
v = int(v)
setattr(event, k, v)
# If it has start_date, then it's already an Asset
# object from asset_for_symbol, and we don't have to
# transform it any further. Checking for start_date is
# faster than isinstance.
if event.sid in asset_cache:
event.sid = asset_cache[event.sid]
elif hasattr(event.sid, "start_date"):
# Clone for user algo code, if we haven't already.
asset_cache[event.sid] = event.sid
elif self.finder and isinstance(event.sid, int):
asset = self.finder.retrieve_asset(event.sid, default_none=True)
if asset:
# Clone for user algo code.
event.sid = asset_cache[asset] = asset
elif self.mask:
# When masking drop all non-mappable values.
continue
elif self.symbol is None:
# If the event's sid property is an int we coerce
# it into an Equity.
event.sid = asset_cache[event.sid] = Equity(event.sid)
event.type = DATASOURCE_TYPE.CUSTOM
event.source_id = self.namestring
yield event
class PandasRequestsCSV(PandasCSV):
# maximum 100 megs to prevent DDoS
MAX_DOCUMENT_SIZE = (1024 * 1024) * 100
# maximum number of bytes to read in at a time
CONTENT_CHUNK_SIZE = 4096
def __init__(
self,
url,
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
country_code,
special_params_checker=None,
**kwargs,
):
# Peel off extra requests kwargs, forwarding the remaining kwargs to
# the superclass.
# Also returns possible https updated url if sent to http quandl ds
# If url hasn't changed, will just return the original.
self._requests_kwargs, self.url = mask_requests_args(
url, params_checker=special_params_checker, **kwargs
)
remaining_kwargs = {
k: v for k, v in kwargs.items() if k not in self.requests_kwargs
}
self.namestring = type(self).__name__
super(PandasRequestsCSV, self).__init__(
pre_func,
post_func,
asset_finder,
trading_day,
start_date,
end_date,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency,
country_code=country_code,
**remaining_kwargs,
)
self.fetch_size = None
self.fetch_hash = None
self.df = self.load_df()
self.special_params_checker = special_params_checker
@property
def requests_kwargs(self):
return self._requests_kwargs
def fetch_url(self, url):
info = "checking {url} with {params}"
logger.info(info.format(url=url, params=self.requests_kwargs))
# setting decode_unicode=True sometimes results in a
# UnicodeEncodeError exception, so instead we'll use
# pandas logic for decoding content
try:
response = requests.get(url, **self.requests_kwargs)
except requests.exceptions.ConnectionError:
raise Exception("Could not connect to %s" % url)
if not response.ok:
raise Exception("Problem reaching %s" % url)
elif response.is_redirect:
# On the offchance we don't catch a redirect URL
# in validation, this will catch it.
new_url = response.headers["location"]
raise FetcherCSVRedirectError(
url=url,
new_url=new_url,
extra={"old_url": url, "new_url": new_url},
)
content_length = 0
logger.info(
"{} connection established in {:.1f} seconds".format(
url, response.elapsed.total_seconds()
)
)
# use the decode_unicode flag to ensure that the output of this is
# a string, and not bytes.
for chunk in response.iter_content(
self.CONTENT_CHUNK_SIZE, decode_unicode=True
):
if content_length > self.MAX_DOCUMENT_SIZE:
raise Exception("Document size too big.")
if chunk:
#---------------------------------------------------------------------------------------------------------------------
#20221224 (by MRC)
#When there is no Header (as some of the files of CBOE) this function will crash your algo when reading
#a headerless csv file. Although it's a known problem in Requests it's not solved for years so we need
#to patch this file here. See also description in ref: quantopian#1837)
#https://github.com/peterfabakker/zipline/commit/07c9315359f59805994b8c29c3934d5d679210a6#diff-3e615ad9208f4f7263524485c17d39022ab58ce6a028ae45900a628bd47c30be
if isinstance(chunk, bytes):
if content_length == 0:
import codecs
if chunk[:3] == codecs.BOM_UTF8:
encoding = 'utf-8-sig'
elif chunk[:2] == codecs.BOM_UTF16_LE or chunk[:2] == codecs.BOM_UTF16_BE:
encoding = 'utf-16'
elif chunk[:4] == codecs.BOM_UTF32_LE or chunk[:4] == codecs.BOM_UTF32_BE:
encoding = 'utf-32'
else:
encoding = 'utf-8'
decoder = codecs.getincrementaldecoder(encoding)(errors='replace')
chunk = decoder.decode(chunk)
#---------------------------------------------------------------------------------------------------------------------
content_length += len(chunk)
yield chunk
return
def fetch_data(self):
# create a data frame directly from the full text of
# the response from the returned file-descriptor.
#data = self.fetch_url(self.url)
data = self.fetch_url(self.url)
fd = StringIO()
#fd = BytesIO()
if isinstance(data, str):
fd.write(data)
else:
for chunk in data:
fd.write(chunk)
self.fetch_size = fd.tell()
fd.seek(0)
try:
# see if pandas can parse csv data
frames = read_csv(fd, **self.pandas_kwargs)
frames_hash = hashlib.md5(str(fd.getvalue()).encode("utf-8"))
self.fetch_hash = frames_hash.hexdigest()
except pd.parser.CParserError:
# could not parse the data, raise exception
raise Exception("Error parsing remote CSV data.")
finally:
fd.close()
return frames | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/sources/requests_csv.py | requests_csv.py |
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
from numpy import concatenate
from lru import LRU
from pandas import isnull
from toolz import sliding_window
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.lib._int64window import AdjustedArrayWindow as Int64Window
from zipline.lib._float64window import AdjustedArrayWindow as Float64Window
from zipline.lib.adjustment import Float64Multiply, Float64Add
from zipline.utils.cache import ExpiringCache
from zipline.utils.math_utils import number_of_decimal_places
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import float64_dtype
from zipline.utils.pandas_utils import find_in_sorted_index, normalize_date
# Default number of decimal places used for rounding asset prices.
DEFAULT_ASSET_PRICE_DECIMALS = 3
class HistoryCompatibleUSEquityAdjustmentReader(object):
def __init__(self, adjustment_reader):
self._adjustments_reader = adjustment_reader
def load_pricing_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(asset, dts, column))
out[i] = adjs
return out
def _get_adjustments_in_range(self, asset, dts, field):
"""
Get the Float64Multiply objects to pass to an AdjustedArrayWindow.
For the use of AdjustedArrayWindow in the loader, which looks back
from current simulation time back to a window of data the dictionary is
structured with:
- the key into the dictionary for adjustments is the location of the
day from which the window is being viewed.
- the start of all multiply objects is always 0 (in each window all
adjustments are overlapping)
- the end of the multiply object is the location before the calendar
location of the adjustment action, making all days before the event
adjusted.
Parameters
----------
asset : Asset
The assets for which to get adjustments.
dts : iterable of datetime64-like
The dts for which adjustment data is needed.
field : str
OHLCV field for which to get the adjustments.
Returns
-------
out : dict[loc -> Float64Multiply]
The adjustments as a dict of loc -> Float64Multiply
"""
sid = int(asset)
start = normalize_date(dts[0])
end = normalize_date(dts[-1])
adjs = {}
if field != "volume":
mergers = self._adjustments_reader.get_adjustments_for_sid(
"mergers", sid
)
for m in mergers:
dt = m[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
divs = self._adjustments_reader.get_adjustments_for_sid(
"dividends", sid
)
for d in divs:
dt = d[0]
if start < dt <= end:
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1])
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
splits = self._adjustments_reader.get_adjustments_for_sid("splits", sid)
for s in splits:
dt = s[0]
if start < dt <= end:
if field == "volume":
ratio = 1.0 / s[1]
else:
ratio = s[1]
end_loc = dts.searchsorted(dt)
adj_loc = end_loc
mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio)
try:
adjs[adj_loc].append(mult)
except KeyError:
adjs[adj_loc] = [mult]
return adjs
class ContinuousFutureAdjustmentReader(object):
"""
Calculates adjustments for continuous futures, based on the
close and open of the contracts on the either side of each roll.
"""
def __init__(
self,
trading_calendar,
asset_finder,
bar_reader,
roll_finders,
frequency,
):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._bar_reader = bar_reader
self._roll_finders = roll_finders
self._frequency = frequency
def load_pricing_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(asset, dts, column))
out[i] = adjs
return out
def _make_adjustment(
self, adjustment_type, front_close, back_close, end_loc
):
adj_base = back_close - front_close
if adjustment_type == "mul":
adj_value = 1.0 + adj_base / front_close
adj_class = Float64Multiply
elif adjustment_type == "add":
adj_value = adj_base
adj_class = Float64Add
return adj_class(0, end_loc, 0, 0, adj_value)
def _get_adjustments_in_range(self, cf, dts, field):
if field == "volume" or field == "sid":
return {}
if cf.adjustment is None:
return {}
rf = self._roll_finders[cf.roll_style]
partitions = []
rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1], cf.offset)
tc = self._trading_calendar
adjs = {}
for front, back in sliding_window(2, rolls):
front_sid, roll_dt = front
back_sid = back[0]
dt = tc.previous_session_label(roll_dt)
if self._frequency == "minute":
dt = tc.open_and_close_for_session(dt)[1]
roll_dt = tc.open_and_close_for_session(roll_dt)[0]
partitions.append((front_sid, back_sid, dt, roll_dt))
for partition in partitions:
front_sid, back_sid, dt, roll_dt = partition
last_front_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(front_sid), dt
)
last_back_dt = self._bar_reader.get_last_traded_dt(
self._asset_finder.retrieve_asset(back_sid), dt
)
if isnull(last_front_dt) or isnull(last_back_dt):
continue
front_close = self._bar_reader.get_value(
front_sid, last_front_dt, "close"
)
back_close = self._bar_reader.get_value(
back_sid, last_back_dt, "close"
)
adj_loc = dts.searchsorted(roll_dt)
end_loc = adj_loc - 1
adj = self._make_adjustment(
cf.adjustment, front_close, back_close, end_loc
)
try:
adjs[adj_loc].append(adj)
except KeyError:
adjs[adj_loc] = [adj]
return adjs
class SlidingWindow(object):
"""
Wrapper around an AdjustedArrayWindow which supports monotonically
increasing (by datetime) requests for a sized window of data.
Parameters
----------
window : AdjustedArrayWindow
Window of pricing data with prefetched values beyond the current
simulation dt.
cal_start : int
Index in the overall calendar at which the window starts.
"""
def __init__(self, window, size, cal_start, offset):
self.window = window
self.cal_start = cal_start
self.current = next(window)
self.offset = offset
self.most_recent_ix = self.cal_start + size
def get(self, end_ix):
"""
Returns
-------
out : A np.ndarray of the equity pricing up to end_ix after adjustments
and rounding have been applied.
"""
if self.most_recent_ix == end_ix:
return self.current
target = end_ix - self.cal_start - self.offset + 1
self.current = self.window.seek(target)
self.most_recent_ix = end_ix
return self.current
class HistoryLoader(metaclass=ABCMeta):
"""
Loader for sliding history windows, with support for adjustments.
Parameters
----------
trading_calendar: TradingCalendar
Contains the grouping logic needed to assign minutes to periods.
reader : DailyBarReader, MinuteBarReader
Reader for pricing bars.
adjustment_reader : SQLiteAdjustmentReader
Reader for adjustment data.
"""
FIELDS = ("open", "high", "low", "close", "volume", "sid")
def __init__(
self,
trading_calendar,
reader,
equity_adjustment_reader,
asset_finder,
roll_finders=None,
sid_cache_size=1000,
prefetch_length=0,
):
self.trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._reader = reader
self._adjustment_readers = {}
if equity_adjustment_reader is not None:
self._adjustment_readers[
Equity
] = HistoryCompatibleUSEquityAdjustmentReader(
equity_adjustment_reader
)
if roll_finders:
self._adjustment_readers[
ContinuousFuture
] = ContinuousFutureAdjustmentReader(
trading_calendar,
asset_finder,
reader,
roll_finders,
self._frequency,
)
self._window_blocks = {
field: ExpiringCache(LRU(sid_cache_size)) for field in self.FIELDS
}
self._prefetch_length = prefetch_length
@abstractproperty
def _frequency(self):
pass
@abstractproperty
def _calendar(self):
pass
@abstractmethod
def _array(self, start, end, assets, field):
pass
def _decimal_places_for_asset(self, asset, reference_date):
if isinstance(asset, Future) and asset.tick_size:
return number_of_decimal_places(asset.tick_size)
elif isinstance(asset, ContinuousFuture):
# Tick size should be the same for all contracts of a continuous
# future, so arbitrarily get the contract with next upcoming auto
# close date.
oc = self._asset_finder.get_ordered_contracts(asset.root_symbol)
contract_sid = oc.contract_before_auto_close(reference_date.value)
if contract_sid is not None:
contract = self._asset_finder.retrieve_asset(contract_sid)
if contract.tick_size:
return number_of_decimal_places(contract.tick_size)
return DEFAULT_ASSET_PRICE_DECIMALS
def _ensure_sliding_windows(self, assets, dts, field, is_perspective_after):
"""
Ensure that there is a Float64Multiply window for each asset that can
provide data for the given parameters.
If the corresponding window for the (assets, len(dts), field) does not
exist, then create a new one.
If a corresponding window does exist for (assets, len(dts), field), but
can not provide data for the current dts range, then create a new
one and replace the expired window.
Parameters
----------
assets : iterable of Assets
The assets in the window
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
see: `PricingHistoryLoader.history`
Returns
-------
out : list of Float64Window with sufficient data so that each asset's
window can provide `get` for the index corresponding with the last
value in `dts`
"""
end = dts[-1]
size = len(dts)
asset_windows = {}
needed_assets = []
cal = self._calendar
assets = self._asset_finder.retrieve_all(assets)
end_ix = find_in_sorted_index(cal, end)
for asset in assets:
try:
window = self._window_blocks[field].get(
(asset, size, is_perspective_after), end
)
except KeyError:
needed_assets.append(asset)
else:
if end_ix < window.most_recent_ix:
# Window needs reset. Requested end index occurs before the
# end index from the previous history call for this window.
# Grab new window instead of rewinding adjustments.
needed_assets.append(asset)
else:
asset_windows[asset] = window
if needed_assets:
offset = 0
start_ix = find_in_sorted_index(cal, dts[0])
prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1)
prefetch_end = cal[prefetch_end_ix]
prefetch_dts = cal[start_ix : prefetch_end_ix + 1]
if is_perspective_after:
adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1)
adj_dts = cal[start_ix : adj_end_ix + 1]
else:
adj_dts = prefetch_dts
prefetch_len = len(prefetch_dts)
array = self._array(prefetch_dts, needed_assets, field)
if field == "sid":
window_type = Int64Window
else:
window_type = Float64Window
view_kwargs = {}
if field == "volume":
array = array.astype(float64_dtype)
for i, asset in enumerate(needed_assets):
adj_reader = None
try:
adj_reader = self._adjustment_readers[type(asset)]
except KeyError:
adj_reader = None
if adj_reader is not None:
adjs = adj_reader.load_pricing_adjustments(
[field], adj_dts, [asset]
)[0]
else:
adjs = {}
window = window_type(
array[:, i].reshape(prefetch_len, 1),
view_kwargs,
adjs,
offset,
size,
int(is_perspective_after),
self._decimal_places_for_asset(asset, dts[-1]),
)
sliding_window = SlidingWindow(window, size, start_ix, offset)
asset_windows[asset] = sliding_window
self._window_blocks[field].set(
(asset, size, is_perspective_after),
sliding_window,
prefetch_end,
)
return [asset_windows[asset] for asset in assets]
def history(self, assets, dts, field, is_perspective_after):
"""
A window of pricing data with adjustments applied assuming that the
end of the window is the day before the current simulation time.
Parameters
----------
assets : iterable of Assets
The assets in the window.
dts : iterable of datetime64-like
The datetimes for which to fetch data.
Makes an assumption that all dts are present and contiguous,
in the calendar.
field : str
The OHLCV field for which to retrieve data.
is_perspective_after : bool
True, if the window is being viewed immediately after the last dt
in the sliding window.
False, if the window is viewed on the last dt.
This flag is used for handling the case where the last dt in the
requested window immediately precedes a corporate action, e.g.:
- is_perspective_after is True
When the viewpoint is after the last dt in the window, as when a
daily history window is accessed from a simulation that uses a
minute data frequency, the history call to this loader will not
include the current simulation dt. At that point in time, the raw
data for the last day in the window will require adjustment, so the
most recent adjustment with respect to the simulation time is
applied to the last dt in the requested window.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 9:31. Simulation frequency is 'minute'.
(In this case this function is called with 4 daily dts, and the
calling function is responsible for stitching back on the
'current' dt)
| | | | | last dt | <-- viewer is here |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | |
| adj | 5.05 | 5.10 | 5.15 | 5.25 | |
The adjustment is applied to the last dt, 05-26, and all previous
dts.
- is_perspective_after is False, daily
When the viewpoint is the same point in time as the last dt in the
window, as when a daily history window is accessed from a
simulation that uses a daily data frequency, the history call will
include the current dt. At that point in time, the raw data for the
last day in the window will be post-adjustment, so no adjustment
is applied to the last dt.
An example equity which has a 0.5 split ratio dated for 05-27,
with the dts for a history call of 5 bars with a '1d' frequency at
05-27 0:00. Simulation frequency is 'daily'.
| | | | | | <-- viewer is here |
| | | | | | last dt |
| | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 |
| raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 |
| adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 |
Adjustments are applied 05-23 through 05-26 but not to the last dt,
05-27
Returns
-------
out : np.ndarray with shape(len(days between start, end), len(assets))
"""
block = self._ensure_sliding_windows(
assets, dts, field, is_perspective_after
)
end_ix = self._calendar.searchsorted(dts[-1])
return concatenate(
[window.get(end_ix) for window in block],
axis=1,
)
class DailyHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return "daily"
@property
def _calendar(self):
return self._reader.sessions
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0]
class MinuteHistoryLoader(HistoryLoader):
@property
def _frequency(self):
return "minute"
@lazyval
def _calendar(self):
mm = self.trading_calendar.all_minutes
start = mm.searchsorted(self._reader.first_trading_day)
end = mm.searchsorted(self._reader.last_available_dt, side="right")
return mm[start:end]
def _array(self, dts, assets, field):
return self._reader.load_raw_arrays(
[field],
dts[0],
dts[-1],
assets,
)[0] | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/history_loader.py | history_loader.py |
from functools import partial
import warnings
with warnings.catch_warnings(): # noqa
warnings.filterwarnings("ignore", category=DeprecationWarning)
from bcolz import carray, ctable
import numpy as np
import logbook
from numpy import (
array,
full,
iinfo,
nan,
)
from pandas import (
DatetimeIndex,
NaT,
read_csv,
to_datetime,
Timestamp,
)
from toolz import compose
from zipline.utils.calendar_utils import get_calendar
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataOnDate,
)
from zipline.utils.functional import apply
from zipline.utils.input_validation import expect_element
from zipline.utils.numpy_utils import iNaT, float64_dtype, uint32_dtype, uint64_dtype #20230706 (by MRC) 成交量溢位問題,新增uint64_dtype
from zipline.utils.memoize import lazyval
from zipline.utils.cli import maybe_show_progress
from ._equities import _compute_row_slices, _read_bcolz_data
logger = logbook.Logger("UsEquityPricing")
OHLC = frozenset(["open", "high", "low", "close"])
US_EQUITY_PRICING_BCOLZ_COLUMNS = (
"open",
"high",
"low",
"close",
"volume",
"day",
"id",
)
UINT32_MAX = iinfo(np.uint32).max
def check_uint32_safe(value, colname):
if value >= UINT32_MAX:
raise ValueError("Value %s from column '%s' is too large" % (value, colname))
@expect_element(invalid_data_behavior={"warn", "raise", "ignore"})
def winsorise_uint32(df, invalid_data_behavior, column, *columns):
"""Drops any record where a value would not fit into a uint32.
Parameters
----------
df : pd.DataFrame
The dataframe to winsorise.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is outside the bounds of a uint32.
*columns : iterable[str]
The names of the columns to check.
Returns
-------
truncated : pd.DataFrame
``df`` with values that do not fit into a uint32 zeroed out.
"""
columns = list((column,) + columns)
mask = df[columns] > UINT32_MAX
if invalid_data_behavior != "ignore":
mask |= df[columns].isnull()
else:
# we are not going to generate a warning or error for this so just use
# nan_to_num
df[columns] = np.nan_to_num(df[columns])
mv = mask.values
if mv.any():
if invalid_data_behavior == "raise":
raise ValueError(
"%d values out of bounds for uint32: %r"
% (
mv.sum(),
df[mask.any(axis=1)],
),
)
if invalid_data_behavior == "warn":
warnings.warn(
"Ignoring %d values because they are out of bounds for"
" uint32: %r"
% (
mv.sum(),
df[mask.any(axis=1)],
),
stacklevel=3, # one extra frame for `expect_element`
)
df[mask] = 0
return df
class BcolzDailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that can
be read efficiently by BcolzDailyOHLCVReader.
Parameters
----------
filename : str
The location at which we should write our output.
calendar : zipline.utils.calendar.trading_calendar
Calendar to use to compute asset calendar offsets.
start_session: pd.Timestamp
Midnight UTC session label.
end_session: pd.Timestamp
Midnight UTC session label.
See Also
--------
zipline.data.bcolz_daily_bars.BcolzDailyBarReader
"""
_csv_dtypes = {
"open": float64_dtype,
"high": float64_dtype,
"low": float64_dtype,
"close": float64_dtype,
"volume": float64_dtype,
}
def __init__(self, filename, calendar, start_session, end_session):
self._filename = filename
if start_session != end_session:
if not calendar.is_session(start_session):
raise ValueError("Start session %s is invalid!" % start_session)
if not calendar.is_session(end_session):
raise ValueError("End session %s is invalid!" % end_session)
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
@property
def progress_bar_message(self):
return "Merging daily equity files:"
def progress_bar_item_show_func(self, value):
return value if value is None else str(value[0])
def write(
self, data, assets=None, show_progress=False, invalid_data_behavior="warn"
):
"""
Parameters
----------
data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]]
The data chunks to write. Each chunk should be a tuple of sid
and the data for that asset.
assets : set[int], optional
The assets that should be in ``data``. If this is provided
we will check ``data`` against the assets and provide better
progress information.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional
What to do when data is encountered that is outside the range of
a uint32.
Returns
-------
table : bcolz.ctable
The newly-written table.
"""
ctx = maybe_show_progress(
((sid, self.to_ctable(df, invalid_data_behavior)) for sid, df in data),
show_progress=show_progress,
item_show_func=self.progress_bar_item_show_func,
label=self.progress_bar_message,
length=len(assets) if assets is not None else None,
)
with ctx as it:
return self._write_internal(it, assets)
def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior="warn"):
"""Read CSVs as DataFrames from our asset map.
Parameters
----------
asset_map : dict[int -> str]
A mapping from asset id to file path with the CSV data for that
asset
show_progress : bool
Whether or not to show a progress bar while writing.
invalid_data_behavior : {'warn', 'raise', 'ignore'}
What to do when data is encountered that is outside the range of
a uint32.
"""
read = partial(
read_csv,
parse_dates=["day"],
index_col="day",
dtype=self._csv_dtypes,
)
return self.write(
((asset, read(path)) for asset, path in asset_map.items()),
assets=asset_map.keys(),
show_progress=show_progress,
invalid_data_behavior=invalid_data_behavior,
)
def _write_internal(self, iterator, assets):
"""
Internal implementation of write.
`iterator` should be an iterator yielding pairs of (asset, ctable).
"""
total_rows = 0
first_row = {}
last_row = {}
calendar_offset = {}
# Maps column name -> output carray.
columns = {
k: carray(array([], dtype=uint64_dtype)) #20230706 (by MRC) 成交量溢位問題,uint32->uint64
for k in US_EQUITY_PRICING_BCOLZ_COLUMNS
}
earliest_date = None
sessions = self._calendar.sessions_in_range(
self._start_session, self._end_session
)
if assets is not None:
@apply
def iterator(iterator=iterator, assets=set(assets)):
for asset_id, table in iterator:
if asset_id not in assets:
raise ValueError("unknown asset id %r" % asset_id)
yield asset_id, table
for asset_id, table in iterator:
nrows = len(table)
for column_name in columns:
if column_name == "id":
# We know what the content of this column is, so don't
# bother reading it.
columns["id"].append(
full((nrows,), asset_id, dtype="uint64"), #20230706 (by MRC) 成交量溢位問題,uint32->uint64
)
continue
columns[column_name].append(table[column_name])
if earliest_date is None:
earliest_date = table["day"][0]
else:
earliest_date = min(earliest_date, table["day"][0])
# Bcolz doesn't support ints as keys in `attrs`, so convert
# assets to strings for use as attr keys.
asset_key = str(asset_id)
# Calculate the index into the array of the first and last row
# for this asset. This allows us to efficiently load single
# assets when querying the data back out of the table.
first_row[asset_key] = total_rows
last_row[asset_key] = total_rows + nrows - 1
total_rows += nrows
table_day_to_session = compose(
self._calendar.minute_to_session_label,
partial(Timestamp, unit="s", tz="UTC"),
)
asset_first_day = table_day_to_session(table["day"][0])
asset_last_day = table_day_to_session(table["day"][-1])
asset_sessions = sessions[
sessions.slice_indexer(asset_first_day, asset_last_day)
]
assert len(table) == len(asset_sessions), (
"Got {} rows for daily bars table with first day={}, last "
"day={}, expected {} rows.\n"
"Missing sessions: {}\n"
"Extra sessions: {}".format(
len(table),
asset_first_day.date(),
asset_last_day.date(),
len(asset_sessions),
asset_sessions.difference(
to_datetime(
np.array(table["day"]),
unit="s",
utc=True,
)
).tolist(),
to_datetime(
np.array(table["day"]),
unit="s",
utc=True,
)
.difference(asset_sessions)
.tolist(),
)
)
# Calculate the number of trading days between the first date
# in the stored data and the first date of **this** asset. This
# offset used for output alignment by the reader.
calendar_offset[asset_key] = sessions.get_loc(asset_first_day)
# This writes the table to disk.
full_table = ctable(
columns=[columns[colname] for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS],
names=US_EQUITY_PRICING_BCOLZ_COLUMNS,
rootdir=self._filename,
mode="w",
)
full_table.attrs["first_trading_day"] = (
earliest_date if earliest_date is not None else iNaT
)
full_table.attrs["first_row"] = first_row
full_table.attrs["last_row"] = last_row
full_table.attrs["calendar_offset"] = calendar_offset
full_table.attrs["calendar_name"] = self._calendar.name
full_table.attrs["start_session_ns"] = self._start_session.value
full_table.attrs["end_session_ns"] = self._end_session.value
full_table.flush()
return full_table
@expect_element(invalid_data_behavior={"warn", "raise", "ignore"})
def to_ctable(self, raw_data, invalid_data_behavior):
if isinstance(raw_data, ctable):
# we already have a ctable so do nothing
return raw_data
#winsorise_uint32(raw_data, invalid_data_behavior, "volume", *OHLC) #20230706 (by MRC) 成交量溢位問題,uint32->uint64
processed = (raw_data[list(OHLC)] * 1000).round().astype("uint64") #20230706 (by MRC) 成交量溢位問題,uint32->uint64
dates = raw_data.index.values.astype("datetime64[s]")
#check_uint32_safe(dates.max().view(np.int64), "day") #20230706 (by MRC) 成交量溢位問題,uint32->uint64
processed["day"] = dates.astype("uint64") #20230706 (by MRC) 成交量溢位問題,uint32->uint64
processed["volume"] = raw_data.volume.astype("uint64") #20230706 (by MRC) 成交量溢位問題,uint32->uint64
return ctable.fromdataframe(processed)
class BcolzDailyBarReader(CurrencyAwareSessionBarReader):
"""
Reader for raw pricing data written by BcolzDailyOHLCVWriter.
Parameters
----------
table : bcolz.ctable
The ctable contaning the pricing data, with attrs corresponding to the
Attributes list below.
read_all_threshold : int
The number of equities at which; below, the data is read by reading a
slice from the carray per asset. above, the data is read by pulling
all of the data for all assets into memory and then indexing into that
array for each day and asset pair. Used to tune performance of reads
when using a small or large number of equities.
Attributes
----------
The table with which this loader interacts contains the following
attributes:
first_row : dict
Map from asset_id -> index of first row in the dataset with that id.
last_row : dict
Map from asset_id -> index of last row in the dataset with that id.
calendar_offset : dict
Map from asset_id -> calendar index of first row.
start_session_ns: int
Epoch ns of the first session used in this dataset.
end_session_ns: int
Epoch ns of the last session used in this dataset.
calendar_name: str
String identifier of trading calendar used (ie, "NYSE").
We use first_row and last_row together to quickly find ranges of rows to
load when reading an asset's data into memory.
We use calendar_offset and calendar to orient loaded blocks within a
range of queried dates.
Notes
------
A Bcolz CTable is comprised of Columns and Attributes.
The table with which this loader interacts contains the following columns:
['open', 'high', 'low', 'close', 'volume', 'day', 'id'].
The data in these columns is interpreted as follows:
- Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 *
as-traded dollar value.
- Volume is interpreted as as-traded volume.
- Day is interpreted as seconds since midnight UTC, Jan 1, 1970.
- Id is the asset id of the row.
The data in each column is grouped by asset and then sorted by day within
each asset block.
The table is built to represent a long time range of data, e.g. ten years
of equity data, so the lengths of each asset block is not equal to each
other. The blocks are clipped to the known start and end date of each asset
to cut down on the number of empty values that would need to be included to
make a regular/cubic dataset.
When read across the open, high, low, close, and volume with the same
index should represent the same asset and day.
See Also
--------
zipline.data.bcolz_daily_bars.BcolzDailyBarWriter
"""
def __init__(self, table, read_all_threshold=3000):
self._maybe_table_rootdir = table
# Cache of fully read np.array for the carrays in the daily bar table.
# raw_array does not use the same cache, but it could.
# Need to test keeping the entire array in memory for the course of a
# process first.
self._spot_cols = {}
self.PRICE_ADJUSTMENT_FACTOR = 0.001
self._read_all_threshold = read_all_threshold
@lazyval
def _table(self):
maybe_table_rootdir = self._maybe_table_rootdir
if isinstance(maybe_table_rootdir, ctable):
return maybe_table_rootdir
return ctable(rootdir=maybe_table_rootdir, mode="r")
@lazyval
def sessions(self):
if "calendar" in self._table.attrs.attrs:
# backwards compatibility with old formats, will remove
return DatetimeIndex(self._table.attrs["calendar"], tz="UTC")
else:
cal = get_calendar(self._table.attrs["calendar_name"])
start_session_ns = self._table.attrs["start_session_ns"]
start_session = Timestamp(start_session_ns, tz="UTC")
end_session_ns = self._table.attrs["end_session_ns"]
end_session = Timestamp(end_session_ns, tz="UTC")
sessions = cal.sessions_in_range(start_session, end_session)
return sessions
@lazyval
def _first_rows(self):
return {
int(asset_id): start_index
for asset_id, start_index in self._table.attrs["first_row"].items()
}
@lazyval
def _last_rows(self):
return {
int(asset_id): end_index
for asset_id, end_index in self._table.attrs["last_row"].items()
}
@lazyval
def _calendar_offsets(self):
return {
int(id_): offset
for id_, offset in self._table.attrs["calendar_offset"].items()
}
@lazyval
def first_trading_day(self):
try:
return Timestamp(self._table.attrs["first_trading_day"], unit="s", tz="UTC")
except KeyError:
return None
@lazyval
def trading_calendar(self):
if "calendar_name" in self._table.attrs.attrs:
return get_calendar(self._table.attrs["calendar_name"])
else:
return None
@property
def last_available_dt(self):
return self.sessions[-1]
def _compute_slices(self, start_idx, end_idx, assets):
"""
Compute the raw row indices to load for each asset on a query for the
given dates after applying a shift.
Parameters
----------
start_idx : int
Index of first date for which we want data.
end_idx : int
Index of last date for which we want data.
assets : pandas.Int64Index
Assets for which we want to compute row indices
Returns
-------
A 3-tuple of (first_rows, last_rows, offsets):
first_rows : np.array[intp]
Array with length == len(assets) containing the index of the first
row to load for each asset in `assets`.
last_rows : np.array[intp]
Array with length == len(assets) containing the index of the last
row to load for each asset in `assets`.
offset : np.array[intp]
Array with length == (len(asset) containing the index in a buffer
of length `dates` corresponding to the first row of each asset.
The value of offset[i] will be 0 if asset[i] existed at the start
of a query. Otherwise, offset[i] will be equal to the number of
entries in `dates` for which the asset did not yet exist.
"""
# The core implementation of the logic here is implemented in Cython
# for efficiency.
return _compute_row_slices(
self._first_rows,
self._last_rows,
self._calendar_offsets,
start_idx,
end_idx,
assets,
)
def load_raw_arrays(self, columns, start_date, end_date, assets):
start_idx = self._load_raw_arrays_date_to_index(start_date)
end_idx = self._load_raw_arrays_date_to_index(end_date)
first_rows, last_rows, offsets = self._compute_slices(
start_idx,
end_idx,
assets,
)
read_all = len(assets) > self._read_all_threshold
return _read_bcolz_data(
self._table,
(end_idx - start_idx + 1, len(assets)),
list(columns),
first_rows,
last_rows,
offsets,
read_all,
)
def _load_raw_arrays_date_to_index(self, date):
try:
return self.sessions.get_loc(date)
except KeyError:
raise NoDataOnDate(date)
def _spot_col(self, colname):
"""
Get the colname from daily_bar_table and read all of it into memory,
caching the result.
Parameters
----------
colname : string
A name of a OHLCV carray in the daily_bar_table
Returns
-------
array (uint32)
Full read array of the carray in the daily_bar_table with the
given colname.
"""
try:
col = self._spot_cols[colname]
except KeyError:
col = self._spot_cols[colname] = self._table[colname]
return col
def get_last_traded_dt(self, asset, day):
volumes = self._spot_col("volume")
search_day = day
while True:
try:
ix = self.sid_day_index(asset, search_day)
except NoDataBeforeDate:
return NaT
except NoDataAfterDate:
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
continue
except NoDataOnDate:
return NaT
if volumes[ix] != 0:
return search_day
prev_day_ix = self.sessions.get_loc(search_day) - 1
if prev_day_ix > -1:
search_day = self.sessions[prev_day_ix]
else:
return NaT
def sid_day_index(self, sid, day):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
int
Index into the data tape for the given sid and day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
"""
try:
day_loc = self.sessions.get_loc(day)
except Exception:
raise NoDataOnDate(
"day={0} is outside of calendar={1}".format(day, self.sessions)
)
offset = day_loc - self._calendar_offsets[sid]
if offset < 0:
raise NoDataBeforeDate(
"No data on or before day={0} for sid={1}".format(day, sid)
)
ix = self._first_rows[sid] + offset
if ix > self._last_rows[sid]:
raise NoDataAfterDate(
"No data on or after day={0} for sid={1}".format(day, sid)
)
return ix
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
colname : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
ix = self.sid_day_index(sid, dt)
price = self._spot_col(field)[ix]
if field != "volume":
if price == 0:
return nan
else:
return price * 0.001
else:
return price
def currency_codes(self, sids):
# XXX: This is pretty inefficient. This reader doesn't really support
# country codes, so we always either return USD or None if we don't
# know about the sid at all.
first_rows = self._first_rows
out = []
for sid in sids:
if sid in first_rows:
out.append("USD")
else:
out.append(None)
return np.array(out, dtype=object) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bcolz_daily_bars.py | bcolz_daily_bars.py |
import pandas as pd
from pandas import NaT
from zipline.utils.calendar_utils import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self, frames, calendar, currency_codes, verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in frames.items()}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames["close"].index
self._sids = frames["close"].columns
@classmethod
def from_dfs(cls, dfs, calendar, currency_codes):
"""Helper for construction from a dict of DataFrames."""
return cls(dfs, calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames["close"].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return codes.loc[sids].to_numpy()
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/in_memory_daily_bars.py | in_memory_daily_bars.py |
from abc import ABCMeta, abstractmethod
from numpy import full, nan, int64, zeros
from zipline.utils.memoize import lazyval
class AssetDispatchBarReader(metaclass=ABCMeta):
"""
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
- asset_finder : zipline.assets.AssetFinder
- readers : dict
A dict mapping Asset type to the corresponding
[Minute|Session]BarReader
- last_available_dt : pd.Timestamp or None, optional
If not provided, infers it by using the min of the
last_available_dt values of the underlying readers.
"""
def __init__(
self,
trading_calendar,
asset_finder,
readers,
last_available_dt=None,
):
self._trading_calendar = trading_calendar
self._asset_finder = asset_finder
self._readers = readers
self._last_available_dt = last_available_dt
for t, r in self._readers.items():
assert trading_calendar == r.trading_calendar, (
"All readers must share target trading_calendar. "
"Reader={0} for type={1} uses calendar={2} which does not "
"match the desired shared calendar={3} ".format(
r, t, r.trading_calendar, trading_calendar
)
)
@abstractmethod
def _dt_window_size(self, start_dt, end_dt):
pass
@property
def _asset_types(self):
return self._readers.keys()
def _make_raw_array_shape(self, start_dt, end_dt, num_sids):
return self._dt_window_size(start_dt, end_dt), num_sids
def _make_raw_array_out(self, field, shape):
if field != "volume" and field != "sid":
out = full(shape, nan)
else:
out = zeros(shape, dtype=int64)
return out
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def last_available_dt(self):
if self._last_available_dt is not None:
return self._last_available_dt
else:
return max(r.last_available_dt for r in self._readers.values())
@lazyval
def first_trading_day(self):
return min(r.first_trading_day for r in self._readers.values())
def get_value(self, sid, dt, field):
asset = self._asset_finder.retrieve_asset(sid)
r = self._readers[type(asset)]
return r.get_value(asset, dt, field)
def get_last_traded_dt(self, asset, dt):
r = self._readers[type(asset)]
return r.get_last_traded_dt(asset, dt)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
asset_types = self._asset_types
sid_groups = {t: [] for t in asset_types}
out_pos = {t: [] for t in asset_types}
assets = self._asset_finder.retrieve_all(sids)
for i, asset in enumerate(assets):
t = type(asset)
if t not in sid_groups:
sid_groups[t] = []
if t not in out_pos:
out_pos[t] = []
sid_groups[t].append(asset)
out_pos[t].append(i)
batched_arrays = {
t: self._readers[t].load_raw_arrays(fields, start_dt, end_dt, sid_groups[t])
for t in asset_types
if sid_groups[t]
}
results = []
shape = self._make_raw_array_shape(start_dt, end_dt, len(sids))
for i, field in enumerate(fields):
out = self._make_raw_array_out(field, shape)
for t, arrays in batched_arrays.items():
out[:, out_pos[t]] = arrays[i]
results.append(out)
return results
class AssetDispatchMinuteBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.minutes_in_range(start_dt, end_dt))
class AssetDispatchSessionBarReader(AssetDispatchBarReader):
def _dt_window_size(self, start_dt, end_dt):
return len(self.trading_calendar.sessions_in_range(start_dt, end_dt))
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self.first_trading_day, self.last_available_dt
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/dispatch_bar_reader.py | dispatch_bar_reader.py |
from abc import ABCMeta, abstractmethod
import json
import os
from glob import glob
from os.path import join
from textwrap import dedent
from lru import LRU
import bcolz
from bcolz import ctable
from intervaltree import IntervalTree
import logbook
import numpy as np
import pandas as pd
from pandas import HDFStore
from toolz import keymap, valmap
from zipline.utils.calendar_utils import get_calendar
from zipline.data._minute_bar_internal import (
minute_value,
find_position_of_minute,
find_last_traded_position_internal,
)
from zipline.gens.sim_engine import NANOS_IN_MINUTE
from zipline.data.bar_reader import BarReader, NoDataForSid, NoDataOnDate
from zipline.data.bcolz_daily_bars import check_uint32_safe
from zipline.utils.cli import maybe_show_progress
from zipline.utils.compat import mappingproxy
from zipline.utils.memoize import lazyval
logger = logbook.Logger("MinuteBars")
US_EQUITIES_MINUTES_PER_DAY = 390
FUTURES_MINUTES_PER_DAY = 1440
DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15
OHLC_RATIO = 1000
class BcolzMinuteOverlappingData(Exception):
pass
class BcolzMinuteWriterColumnMismatch(Exception):
pass
class MinuteBarReader(BarReader):
@property
def data_frequency(self):
return "minute"
def _calc_minute_index(market_opens, minutes_per_day):
minutes = np.zeros(len(market_opens) * minutes_per_day, dtype="datetime64[ns]")
deltas = np.arange(0, minutes_per_day, dtype="timedelta64[m]")
for i, market_open in enumerate(market_opens):
start = market_open.asm8
minute_values = start + deltas
start_ix = minutes_per_day * i
end_ix = start_ix + minutes_per_day
minutes[start_ix:end_ix] = minute_values
return pd.to_datetime(minutes, utc=True)
def _sid_subdir_path(sid):
"""
Format subdir path to limit the number directories in any given
subdirectory to 100.
The number in each directory is designed to support at least 100000
equities.
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
A path for the bcolz rootdir, including subdirectory prefixes based on
the padded string representation of the given sid.
e.g. 1 is formatted as 00/00/000001.bcolz
"""
padded_sid = format(sid, "06")
return os.path.join(
# subdir 1 00/XX
padded_sid[0:2],
# subdir 2 XX/00
padded_sid[2:4],
"{0}.bcolz".format(str(padded_sid)),
)
def convert_cols(cols, scale_factor, sid, invalid_data_behavior):
"""Adapt OHLCV columns into uint32 columns.
Parameters
----------
cols : dict
A dict mapping each column name (open, high, low, close, volume)
to a float column to convert to uint32.
scale_factor : int
Factor to use to scale float values before converting to uint32.
sid : int
Sid of the relevant asset, for logging.
invalid_data_behavior : str
Specifies behavior when data cannot be converted to uint32.
If 'raise', raises an exception.
If 'warn', logs a warning and filters out incompatible values.
If 'ignore', silently filters out incompatible values.
"""
scaled_opens = (np.nan_to_num(cols["open"]) * scale_factor).round()
scaled_highs = (np.nan_to_num(cols["high"]) * scale_factor).round()
scaled_lows = (np.nan_to_num(cols["low"]) * scale_factor).round()
scaled_closes = (np.nan_to_num(cols["close"]) * scale_factor).round()
exclude_mask = np.zeros_like(scaled_opens, dtype=bool)
for col_name, scaled_col in [
("open", scaled_opens),
("high", scaled_highs),
("low", scaled_lows),
("close", scaled_closes),
]:
max_val = scaled_col.max()
try:
check_uint32_safe(max_val, col_name)
except ValueError:
if invalid_data_behavior == "raise":
raise
if invalid_data_behavior == "warn":
logger.warn(
"Values for sid={}, col={} contain some too large for "
"uint32 (max={}), filtering them out",
sid,
col_name,
max_val,
)
# We want to exclude all rows that have an unsafe value in
# this column.
exclude_mask &= scaled_col >= np.iinfo(np.uint32).max
# Convert all cols to uint32.
opens = scaled_opens.astype(np.uint32)
highs = scaled_highs.astype(np.uint32)
lows = scaled_lows.astype(np.uint32)
closes = scaled_closes.astype(np.uint32)
volumes = cols["volume"].astype(np.uint32)
# Exclude rows with unsafe values by setting to zero.
opens[exclude_mask] = 0
highs[exclude_mask] = 0
lows[exclude_mask] = 0
closes[exclude_mask] = 0
volumes[exclude_mask] = 0
return opens, highs, lows, closes, volumes
class BcolzMinuteBarMetadata(object):
"""
Parameters
----------
ohlc_ratio : int
The factor by which the pricing data is multiplied so that the
float data can be stored as an integer.
calendar : trading_calendars.trading_calendar.TradingCalendar
The TradingCalendar on which the minute bars are based.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
minutes_per_day : int
The number of minutes per each period.
"""
FORMAT_VERSION = 3
METADATA_FILENAME = "metadata.json"
@classmethod
def metadata_path(cls, rootdir):
return os.path.join(rootdir, cls.METADATA_FILENAME)
@classmethod
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data["version"]
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data["ohlc_ratio"]
if version >= 1:
minutes_per_day = raw_data["minutes_per_day"]
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data["calendar_name"])
start_session = pd.Timestamp(raw_data["start_session"], tz="UTC")
end_session = pd.Timestamp(raw_data["end_session"], tz="UTC")
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar("XNYS")
start_session = pd.Timestamp(raw_data["first_trading_day"], tz="UTC")
end_session = calendar.minute_to_session_label(
pd.Timestamp(raw_data["market_closes"][-1], unit="m", tz="UTC")
)
if version >= 3:
ohlc_ratios_per_sid = raw_data["ohlc_ratios_per_sid"]
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
def __init__(
self,
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=FORMAT_VERSION,
):
self.calendar = calendar
self.start_session = start_session
self.end_session = end_session
self.default_ohlc_ratio = default_ohlc_ratio
self.ohlc_ratios_per_sid = ohlc_ratios_per_sid
self.minutes_per_day = minutes_per_day
self.version = version
def write(self, rootdir):
"""
Write the metadata to a JSON file in the rootdir.
Values contained in the metadata are:
version : int
The value of FORMAT_VERSION of this class.
ohlc_ratio : int
The default ratio by which to multiply the pricing data to
convert the floats from floats to an integer to fit within
the np.uint32. If ohlc_ratios_per_sid is None or does not
contain a mapping for a given sid, this ratio is used.
ohlc_ratios_per_sid : dict
A dict mapping each sid in the output to the factor by
which the pricing data is multiplied so that the float data
can be stored as an integer.
minutes_per_day : int
The number of minutes per each period.
calendar_name : str
The name of the TradingCalendar on which the minute bars are
based.
start_session : datetime
'YYYY-MM-DD' formatted representation of the first trading
session in the data set.
end_session : datetime
'YYYY-MM-DD' formatted representation of the last trading
session in the data set.
"""
# calendar = self.calendar
# slicer = calendar.schedule.index.slice_indexer(
# self.start_session,
# self.end_session,
# )
# schedule = calendar.schedule[slicer]
# market_opens = schedule.market_open
# market_closes = schedule.market_close
metadata = {
"version": self.version,
"ohlc_ratio": self.default_ohlc_ratio,
"ohlc_ratios_per_sid": self.ohlc_ratios_per_sid,
"minutes_per_day": self.minutes_per_day,
"calendar_name": self.calendar.name,
"start_session": str(self.start_session.date()),
"end_session": str(self.end_session.date()),
}
with open(self.metadata_path(rootdir), "w+") as fp:
json.dump(metadata, fp)
class BcolzMinuteBarWriter(object):
"""
Class capable of writing minute OHLCV data to disk into bcolz format.
Parameters
----------
rootdir : string
Path to the root directory into which to write the metadata and
bcolz subdirectories.
calendar : trading_calendars.trading_calendar.TradingCalendar
The trading calendar on which to base the minute bars. Used to
get the market opens used as a starting point for each periodic
span of minutes in the index, and the market closes that
correspond with the market opens.
minutes_per_day : int
The number of minutes per each period. Defaults to 390, the mode
of minutes in NYSE trading days.
start_session : datetime
The first trading session in the data set.
end_session : datetime
The last trading session in the data set.
default_ohlc_ratio : int, optional
The default ratio by which to multiply the pricing data to
convert from floats to integers that fit within np.uint32. If
ohlc_ratios_per_sid is None or does not contain a mapping for a
given sid, this ratio is used. Default is OHLC_RATIO (1000).
ohlc_ratios_per_sid : dict, optional
A dict mapping each sid in the output to the ratio by which to
multiply the pricing data to convert the floats from floats to
an integer to fit within the np.uint32.
expectedlen : int, optional
The expected length of the dataset, used when creating the initial
bcolz ctable.
If the expectedlen is not used, the chunksize and corresponding
compression ratios are not ideal.
Defaults to supporting 15 years of NYSE equity market data.
see: https://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa
write_metadata : bool, optional
If True, writes the minute bar metadata (on init of the writer).
If False, no metadata is written (existing metadata is
retained). Default is True.
Notes
-----
Writes a bcolz directory for each individual sid, all contained within
a root directory which also contains metadata about the entire dataset.
Each individual asset's data is stored as a bcolz table with a column for
each pricing field: (open, high, low, close, volume)
The open, high, low, and close columns are integers which are 1000 times
the quoted price, so that the data can represented and stored as an
np.uint32, supporting market prices quoted up to the thousands place.
volume is a np.uint32 with no mutation of the tens place.
The 'index' for each individual asset are a repeating period of minutes of
length `minutes_per_day` starting from each market open.
The file format does not account for half-days.
e.g.:
2016-01-19 14:31
2016-01-19 14:32
...
2016-01-19 20:59
2016-01-19 21:00
2016-01-20 14:31
2016-01-20 14:32
...
2016-01-20 20:59
2016-01-20 21:00
All assets are written with a common 'index', sharing a common first
trading day. Assets that do not begin trading until after the first trading
day will have zeros for all pricing data up and until data is traded.
'index' is in quotations, because bcolz does not provide an index. The
format allows index-like behavior by writing each minute's data into the
corresponding position of the enumeration of the aforementioned datetime
index.
The datetimes which correspond to each position are written in the metadata
as integer nanoseconds since the epoch into the `minute_index` key.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarReader
"""
COL_NAMES = ("open", "high", "low", "close", "volume")
def __init__(
self,
rootdir,
calendar,
start_session,
end_session,
minutes_per_day,
default_ohlc_ratio=OHLC_RATIO,
ohlc_ratios_per_sid=None,
expectedlen=DEFAULT_EXPECTEDLEN,
write_metadata=True,
):
self._rootdir = rootdir
self._start_session = start_session
self._end_session = end_session
self._calendar = calendar
slicer = calendar.schedule.index.slice_indexer(start_session, end_session)
self._schedule = calendar.schedule[slicer]
self._session_labels = self._schedule.index
self._minutes_per_day = minutes_per_day
self._expectedlen = expectedlen
self._default_ohlc_ratio = default_ohlc_ratio
self._ohlc_ratios_per_sid = ohlc_ratios_per_sid
self._minute_index = _calc_minute_index(
self._schedule.market_open, self._minutes_per_day
)
if write_metadata:
metadata = BcolzMinuteBarMetadata(
self._default_ohlc_ratio,
self._ohlc_ratios_per_sid,
self._calendar,
self._start_session,
self._end_session,
self._minutes_per_day,
)
metadata.write(self._rootdir)
@classmethod
def open(cls, rootdir, end_session=None):
"""
Open an existing ``rootdir`` for writing.
Parameters
----------
end_session : Timestamp (optional)
When appending, the intended new ``end_session``.
"""
metadata = BcolzMinuteBarMetadata.read(rootdir)
return BcolzMinuteBarWriter(
rootdir,
metadata.calendar,
metadata.start_session,
end_session if end_session is not None else metadata.end_session,
metadata.minutes_per_day,
metadata.default_ohlc_ratio,
metadata.ohlc_ratios_per_sid,
write_metadata=end_session is not None,
)
@property
def first_trading_day(self):
return self._start_session
def ohlc_ratio_for_sid(self, sid):
if self._ohlc_ratios_per_sid is not None:
try:
return self._ohlc_ratios_per_sid[sid]
except KeyError:
pass
# If no ohlc_ratios_per_sid dict is passed, or if the specified
# sid is not in the dict, fallback to the general ohlc_ratio.
return self._default_ohlc_ratio
def sidpath(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : string
Full path to the bcolz rootdir for the given sid.
"""
sid_subdir = _sid_subdir_path(sid)
return join(self._rootdir, sid_subdir)
def last_date_in_output_for_sid(self, sid):
"""
Parameters
----------
sid : int
Asset identifier.
Returns
-------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode="r") as f:
sizes = f.read()
data = json.loads(sizes)
# use integer division so that the result is an int
# for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa
num_days = data["shape"][0] // self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._session_labels[num_days - 1]
def _init_ctable(self, path):
"""
Create empty ctable for given path.
Parameters
----------
path : string
The path to rootdir of the new ctable.
"""
# Only create the containing subdir on creation.
# This is not to be confused with the `.bcolz` directory, but is the
# directory up one level from the `.bcolz` directories.
sid_containing_dirname = os.path.dirname(path)
if not os.path.exists(sid_containing_dirname):
# Other sids may have already created the containing directory.
os.makedirs(sid_containing_dirname)
initial_array = np.empty(0, np.uint32)
table = ctable(
rootdir=path,
columns=[
initial_array,
initial_array,
initial_array,
initial_array,
initial_array,
],
names=["open", "high", "low", "close", "volume"],
expectedlen=self._expectedlen,
mode="w",
)
table.flush()
return table
def _ensure_ctable(self, sid):
"""Ensure that a ctable exists for ``sid``, then return it."""
sidpath = self.sidpath(sid)
if not os.path.exists(sidpath):
return self._init_ctable(sidpath)
return bcolz.ctable(rootdir=sidpath, mode="a")
def _zerofill(self, table, numdays):
# Compute the number of minutes to be filled, accounting for the
# possibility of a partial day's worth of minutes existing for
# the previous day.
minute_offset = len(table) % self._minutes_per_day
num_to_prepend = numdays * self._minutes_per_day - minute_offset
prepend_array = np.zeros(num_to_prepend, np.uint32)
# Fill all OHLCV with zeros.
table.append([prepend_array] * 5)
table.flush()
def pad(self, sid, date):
"""
Fill sid container with empty data through the specified date.
If the last recorded trade is not at the close, then that day will be
padded with zeros until its close. Any day after that (up to and
including the specified date) will be padded with `minute_per_day`
worth of zeros
Parameters
----------
sid : int
The asset identifier for the data being written.
date : datetime-like
The date used to calculate how many slots to be pad.
The padding is done through the date, i.e. after the padding is
done the `last_date_in_output_for_sid` will be equal to `date`
"""
table = self._ensure_ctable(sid)
last_date = self.last_date_in_output_for_sid(sid)
tds = self._session_labels
if date <= last_date or date < tds[0]:
# No need to pad.
return
if pd.isnull(last_date):
# If there is no data, determine how many days to add so that
# desired days are written to the correct slots.
days_to_zerofill = tds[tds.slice_indexer(end=date)]
else:
days_to_zerofill = tds[
tds.slice_indexer(start=last_date + tds.freq, end=date)
]
self._zerofill(table, len(days_to_zerofill))
new_last_date = self.last_date_in_output_for_sid(sid)
assert new_last_date == date, "new_last_date={0} != date={1}".format(
new_last_date, date
)
def set_sid_attrs(self, sid, **kwargs):
"""Write all the supplied kwargs as attributes of the sid's file."""
table = self._ensure_ctable(sid)
for k, v in kwargs.items():
table.attrs[k] = v
def write(self, data, show_progress=False, invalid_data_behavior="warn"):
"""Write a stream of minute data.
Parameters
----------
data : iterable[(int, pd.DataFrame)]
The data to write. Each element should be a tuple of sid, data
where data has the following format:
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
A given sid may appear more than once in ``data``; however,
the dates must be strictly increasing.
show_progress : bool, optional
Whether or not to show a progress bar while writing.
"""
ctx = maybe_show_progress(
data,
show_progress=show_progress,
item_show_func=lambda e: e if e is None else str(e[0]),
label="Merging minute equity files:",
)
write_sid = self.write_sid
with ctx as it:
for e in it:
write_sid(*e, invalid_data_behavior=invalid_data_behavior)
def write_sid(self, sid, df, invalid_data_behavior="warn"):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifer for the data being written.
df : pd.DataFrame
DataFrame of market data with the following characteristics.
columns : ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
index : DatetimeIndex of market minutes.
"""
cols = {
"open": df.open.values,
"high": df.high.values,
"low": df.low.values,
"close": df.close.values,
"volume": df.volume.values,
}
dts = df.index.values
# Call internal method, since DataFrame has already ensured matching
# index and value lengths.
self._write_cols(sid, dts, cols, invalid_data_behavior)
def write_cols(self, sid, dts, cols, invalid_data_behavior="warn"):
"""
Write the OHLCV data for the given sid.
If there is no bcolz ctable yet created for the sid, create it.
If the length of the bcolz ctable is not exactly to the date before
the first day provided, fill the ctable with 0s up to that date.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES):
raise BcolzMinuteWriterColumnMismatch(
"Length of dts={0} should match cols: {1}".format(
len(dts),
" ".join(
"{0}={1}".format(name, len(cols[name]))
for name in self.COL_NAMES
),
)
)
self._write_cols(sid, dts, cols, invalid_data_behavior)
def _write_cols(self, sid, dts, cols, invalid_data_behavior):
"""
Internal method for `write_cols` and `write`.
Parameters
----------
sid : int
The asset identifier for the data being written.
dts : datetime64 array
The dts corresponding to values in cols.
cols : dict of str -> np.array
dict of market data with the following characteristics.
keys are ('open', 'high', 'low', 'close', 'volume')
open : float64
high : float64
low : float64
close : float64
volume : float64|int64
"""
table = self._ensure_ctable(sid)
tds = self._session_labels
input_first_day = self._calendar.minute_to_session_label(
pd.Timestamp(dts[0]), direction="previous"
)
last_date = self.last_date_in_output_for_sid(sid)
day_before_input = input_first_day - tds.freq
self.pad(sid, day_before_input)
table = self._ensure_ctable(sid)
# Get the number of minutes already recorded in this sid's ctable
num_rec_mins = table.size
all_minutes = self._minute_index
# Get the latest minute we wish to write to the ctable
try:
# ensure tz-aware Timestamp has tz UTC
last_minute_to_write = pd.Timestamp(dts[-1]).tz_convert(tz="UTC")
except TypeError:
# if naive, instead convert timestamp to UTC
last_minute_to_write = pd.Timestamp(dts[-1]).tz_localize(tz="UTC")
# In the event that we've already written some minutely data to the
# ctable, guard against overwriting that data.
if num_rec_mins > 0:
last_recorded_minute = all_minutes[num_rec_mins - 1]
if last_minute_to_write <= last_recorded_minute:
raise BcolzMinuteOverlappingData(
dedent(
"""
Data with last_date={0} already includes input start={1} for
sid={2}""".strip()
).format(last_date, input_first_day, sid)
)
latest_min_count = all_minutes.get_loc(last_minute_to_write)
# Get all the minutes we wish to write (all market minutes after the
# latest currently written, up to and including last_minute_to_write)
all_minutes_in_window = all_minutes[num_rec_mins : latest_min_count + 1]
minutes_count = all_minutes_in_window.size
open_col = np.zeros(minutes_count, dtype=np.uint32)
high_col = np.zeros(minutes_count, dtype=np.uint32)
low_col = np.zeros(minutes_count, dtype=np.uint32)
close_col = np.zeros(minutes_count, dtype=np.uint32)
vol_col = np.zeros(minutes_count, dtype=np.uint32)
dt_ixs = np.searchsorted(
all_minutes_in_window.values, pd.Index(dts).tz_localize(None).values
)
ohlc_ratio = self.ohlc_ratio_for_sid(sid)
(
open_col[dt_ixs],
high_col[dt_ixs],
low_col[dt_ixs],
close_col[dt_ixs],
vol_col[dt_ixs],
) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior)
table.append([open_col, high_col, low_col, close_col, vol_col])
table.flush()
def data_len_for_day(self, day):
"""
Return the number of data points up to and including the
provided day.
"""
day_ix = self._session_labels.get_loc(day)
# Add one to the 0-indexed day_ix to get the number of days.
num_days = day_ix + 1
return num_days * self._minutes_per_day
def truncate(self, date):
"""Truncate data beyond this date in all ctables."""
truncate_slice_end = self.data_len_for_day(date)
glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz")
sid_paths = sorted(glob(glob_path))
for sid_path in sid_paths:
file_name = os.path.basename(sid_path)
try:
table = bcolz.open(rootdir=sid_path)
except IOError:
continue
if table.len <= truncate_slice_end:
logger.info("{0} not past truncate date={1}.", file_name, date)
continue
logger.info("Truncating {0} at end_date={1}", file_name, date.date())
table.resize(truncate_slice_end)
# Update end session in metadata.
metadata = BcolzMinuteBarMetadata.read(self._rootdir)
metadata.end_session = date
metadata.write(self._rootdir)
class BcolzMinuteBarReader(MinuteBarReader):
"""
Reader for data written by BcolzMinuteBarWriter
Parameters
----------
rootdir : string
The root directory containing the metadata and asset bcolz
directories.
See Also
--------
zipline.data.minute_bars.BcolzMinuteBarWriter
"""
FIELDS = ("open", "high", "low", "close", "volume")
DEFAULT_MINUTELY_SID_CACHE_SIZES = {
"close": 3000,
"open": 1550,
"high": 1550,
"low": 1550,
"volume": 1550,
}
assert set(FIELDS) == set(
DEFAULT_MINUTELY_SID_CACHE_SIZES
), "FIELDS should match DEFAULT_MINUTELY_SID_CACHE_SIZES keys"
# Wrap the defaults in proxy so that we don't accidentally mutate them in
# place in the constructor. If a user wants to change the defaults, they
# can do so by mutating DEFAULT_MINUTELY_SID_CACHE_SIZES.
_default_proxy = mappingproxy(DEFAULT_MINUTELY_SID_CACHE_SIZES)
def __init__(self, rootdir, sid_cache_sizes=_default_proxy):
self._rootdir = rootdir
metadata = self._get_metadata()
self._start_session = metadata.start_session
self._end_session = metadata.end_session
self.calendar = metadata.calendar
slicer = self.calendar.schedule.index.slice_indexer(
self._start_session,
self._end_session,
)
self._schedule = self.calendar.schedule[slicer]
self._market_opens = self._schedule.market_open
self._market_open_values = self._market_opens.values.astype(
"datetime64[m]"
).astype(np.int64)
self._market_closes = self._schedule.market_close
self._market_close_values = self._market_closes.values.astype(
"datetime64[m]"
).astype(np.int64)
self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio
ohlc_ratios = metadata.ohlc_ratios_per_sid
if ohlc_ratios:
self._ohlc_inverses_per_sid = valmap(lambda x: 1.0 / x, ohlc_ratios)
else:
self._ohlc_inverses_per_sid = None
self._minutes_per_day = metadata.minutes_per_day
self._carrays = {field: LRU(sid_cache_sizes[field]) for field in self.FIELDS}
self._last_get_value_dt_position = None
self._last_get_value_dt_value = None
# This is to avoid any bad data or other performance-killing situation
# where there a consecutive streak of 0 (no volume) starting at an
# asset's start date.
# if asset 1 started on 2015-01-03 but its first trade is 2015-01-06
# 10:31 AM US/Eastern, this dict would store {1: 23675971},
# which is the minute epoch of that date.
self._known_zero_volume_dict = {}
def _get_metadata(self):
return BcolzMinuteBarMetadata.read(self._rootdir)
@property
def trading_calendar(self):
return self.calendar
@lazyval
def last_available_dt(self):
_, close = self.calendar.open_and_close_for_session(self._end_session)
return close
@property
def first_trading_day(self):
return self._start_session
def _ohlc_ratio_inverse_for_sid(self, sid):
if self._ohlc_inverses_per_sid is not None:
try:
return self._ohlc_inverses_per_sid[sid]
except KeyError:
pass
# If we can not get a sid-specific OHLC inverse for this sid,
# fallback to the default.
return self._default_ohlc_inverse
def _minutes_to_exclude(self):
"""
Calculate the minutes which should be excluded when a window
occurs on days which had an early close, i.e. days where the close
based on the regular period of minutes per day and the market close
do not match.
Returns
-------
List of DatetimeIndex representing the minutes to exclude because
of early closes.
"""
market_opens = self._market_opens.values.astype("datetime64[m]")
market_closes = self._market_closes.values.astype("datetime64[m]")
minutes_per_day = (market_closes - market_opens).astype(np.int64)
early_indices = np.where(minutes_per_day != self._minutes_per_day - 1)[0]
early_opens = self._market_opens[early_indices]
early_closes = self._market_closes[early_indices]
minutes = [
(market_open, early_close)
for market_open, early_close in zip(early_opens, early_closes)
]
return minutes
@lazyval
def _minute_exclusion_tree(self):
"""
Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows. (These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close.)
The value of each node is the same start and end position stored as
a tuple.
The data is stored as such in support of a fast answer to the question,
does a given start and end position overlap any of the exclusion spans?
Returns
-------
IntervalTree containing nodes which represent the minutes to exclude
because of early closes.
"""
itree = IntervalTree()
for market_open, early_close in self._minutes_to_exclude():
start_pos = self._find_position_of_minute(early_close) + 1
end_pos = (
self._find_position_of_minute(market_open) + self._minutes_per_day - 1
)
data = (start_pos, end_pos)
itree[start_pos : end_pos + 1] = data
return itree
def _exclusion_indices_for_range(self, start_idx, end_idx):
"""
Returns
-------
List of tuples of (start, stop) which represent the ranges of minutes
which should be excluded when a market minute window is requested.
"""
itree = self._minute_exclusion_tree
if itree.overlaps(start_idx, end_idx):
ranges = []
intervals = itree[start_idx:end_idx]
for interval in intervals:
ranges.append(interval.data)
return sorted(ranges)
else:
return None
def _get_carray_path(self, sid, field):
sid_subdir = _sid_subdir_path(sid)
# carrays are subdirectories of the sid's rootdir
return os.path.join(self._rootdir, sid_subdir, field)
def _open_minute_file(self, field, sid):
sid = int(sid)
try:
carray = self._carrays[field][sid]
except KeyError:
try:
carray = self._carrays[field][sid] = bcolz.carray(
rootdir=self._get_carray_path(sid, field),
mode="r",
)
except IOError:
raise NoDataForSid("No minute data for sid {}.".format(sid))
return carray
def table_len(self, sid):
"""Returns the length of the underlying table for this sid."""
return len(self._open_minute_file("close", sid))
def get_sid_attr(self, sid, name):
sid_subdir = _sid_subdir_path(sid)
sid_path = os.path.join(self._rootdir, sid_subdir)
attrs = bcolz.attrs.attrs(sid_path, "r")
try:
return attrs[name]
except KeyError:
return None
def get_value(self, sid, dt, field):
"""
Retrieve the pricing info for the given sid, dt, and field.
Parameters
----------
sid : int
Asset identifier.
dt : datetime-like
The datetime at which the trade occurred.
field : string
The type of pricing data to retrieve.
('open', 'high', 'low', 'close', 'volume')
Returns
-------
out : float|int
The market data for the given sid, dt, and field coordinates.
For OHLC:
Returns a float if a trade occurred at the given dt.
If no trade occurred, a np.nan is returned.
For volume:
Returns the integer value of the volume.
(A volume of 0 signifies no trades for the given dt.)
"""
if self._last_get_value_dt_value == dt.value:
minute_pos = self._last_get_value_dt_position
else:
try:
minute_pos = self._find_position_of_minute(dt)
except ValueError:
raise NoDataOnDate()
self._last_get_value_dt_value = dt.value
self._last_get_value_dt_position = minute_pos
try:
value = self._open_minute_file(field, sid)[minute_pos]
except IndexError:
value = 0
if value == 0:
if field == "volume":
return 0
else:
return np.nan
if field != "volume":
value *= self._ohlc_ratio_inverse_for_sid(sid)
return value
def get_last_traded_dt(self, asset, dt):
minute_pos = self._find_last_traded_position(asset, dt)
if minute_pos == -1:
return pd.NaT
return self._pos_to_minute(minute_pos)
def _find_last_traded_position(self, asset, dt):
volumes = self._open_minute_file("volume", asset)
start_date_minute = asset.start_date.value / NANOS_IN_MINUTE
dt_minute = dt.value / NANOS_IN_MINUTE
try:
# if we know of a dt before which this asset has no volume,
# don't look before that dt
earliest_dt_to_search = self._known_zero_volume_dict[asset.sid]
except KeyError:
earliest_dt_to_search = start_date_minute
if dt_minute < earliest_dt_to_search:
return -1
pos = find_last_traded_position_internal(
self._market_open_values,
self._market_close_values,
dt_minute,
earliest_dt_to_search,
volumes,
self._minutes_per_day,
)
if pos == -1:
# if we didn't find any volume before this dt, save it to avoid
# work in the future.
try:
self._known_zero_volume_dict[asset.sid] = max(
dt_minute, self._known_zero_volume_dict[asset.sid]
)
except KeyError:
self._known_zero_volume_dict[asset.sid] = dt_minute
return pos
def _pos_to_minute(self, pos):
minute_epoch = minute_value(
self._market_open_values, pos, self._minutes_per_day
)
return pd.Timestamp(minute_epoch, tz="UTC", unit="m")
def _find_position_of_minute(self, minute_dt):
"""
Internal method that returns the position of the given minute in the
list of every trading minute since market open of the first trading
day. Adjusts non market minutes to the last close.
ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if
2002-01-02 is the first trading day of the dataset.
Parameters
----------
minute_dt: pd.Timestamp
The minute whose position should be calculated.
Returns
-------
int: The position of the given minute in the list of all trading
minutes since market open on the first trading day.
"""
return find_position_of_minute(
self._market_open_values,
self._market_close_values,
minute_dt.value / NANOS_IN_MINUTE,
self._minutes_per_day,
False,
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
start_idx = self._find_position_of_minute(start_dt)
end_idx = self._find_position_of_minute(end_dt)
num_minutes = end_idx - start_idx + 1
results = []
indices_to_exclude = self._exclusion_indices_for_range(start_idx, end_idx)
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude:
length = excl_stop - excl_start + 1
num_minutes -= length
shape = num_minutes, len(sids)
for field in fields:
if field != "volume":
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, sid in enumerate(sids):
carray = self._open_minute_file(field, sid)
values = carray[start_idx : end_idx + 1]
if indices_to_exclude is not None:
for excl_start, excl_stop in indices_to_exclude[::-1]:
excl_slice = np.s_[
excl_start - start_idx : excl_stop - start_idx + 1
]
values = np.delete(values, excl_slice)
where = values != 0
# first slice down to len(where) because we might not have
# written data for all the minutes requested
if field != "volume":
out[: len(where), i][where] = values[
where
] * self._ohlc_ratio_inverse_for_sid(sid)
else:
out[: len(where), i][where] = values[where]
results.append(out)
return results
class MinuteBarUpdateReader(object, metaclass=ABCMeta):
"""
Abstract base class for minute update readers.
"""
@abstractmethod
def read(self, dts, sids):
"""
Read and return pricing update data.
Parameters
----------
dts : DatetimeIndex
The minutes for which to read the pricing updates.
sids : iter[int]
The sids for which to read the pricing updates.
Returns
-------
data : iter[(int, DataFrame)]
Returns an iterable of ``sid`` to the corresponding OHLCV data.
"""
raise NotImplementedError()
class H5MinuteBarUpdateWriter(object):
"""
Writer for files containing minute bar updates for consumption by a writer
for a ``MinuteBarReader`` format.
Parameters
----------
path : str
The destination path.
complevel : int, optional
The HDF5 complevel, defaults to ``5``.
complib : str, optional
The HDF5 complib, defaults to ``zlib``.
"""
FORMAT_VERSION = 0
_COMPLEVEL = 5
_COMPLIB = "zlib"
def __init__(self, path, complevel=None, complib=None):
self._complevel = complevel if complevel is not None else self._COMPLEVEL
self._complib = complib if complib is not None else self._COMPLIB
self._path = path
def write(self, frames):
"""
Write the frames to the target HDF5 file with ``pd.MultiIndex``
Parameters
----------
frames : iter[(int, DataFrame)] or dict[int -> DataFrame]
An iterable or other mapping of sid to the corresponding OHLCV
pricing data.
"""
with HDFStore(
self._path, "w", complevel=self._complevel, complib=self._complib
) as store:
data = pd.concat(frames, keys=frames.keys()).sort_index()
data.index.set_names(["sid", "date_time"], inplace=True)
store.append("updates", data)
class H5MinuteBarUpdateReader(MinuteBarUpdateReader):
"""
Reader for minute bar updates stored in HDF5 files.
Parameters
----------
path : str
The path of the HDF5 file from which to source data.
"""
def __init__(self, path):
# todo: error handling
self._df = pd.read_hdf(path).sort_index()
def read(self, dts, sids):
df = self._df.loc[pd.IndexSlice[sids, dts], :]
for sid, data in df.groupby(level="sid"):
data.index = data.index.droplevel("sid")
yield sid, data | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/minute_bars.py | minute_bars.py |
from operator import mul
from logbook import Logger
import numpy as np
from numpy import float64, int64, nan
import pandas as pd
from pandas import isnull
from zipline.utils.calendar_utils import get_calendar #20230514 (by MRC) get_bundle_price
from zipline.data import bundles #20230514 (by MRC) get_bundle_price
from zipline.protocol import BarData #20230710 (by MRC) get_bundle_price
from zipline.finance.asset_restrictions import NoRestrictions #20230710 (by MRC) get_bundle_price
from functools import reduce
from zipline.assets import (
Asset,
AssetConvertible,
Equity,
Future,
PricingDataAssociable,
)
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.data.continuous_future_reader import (
ContinuousFutureSessionBarReader,
ContinuousFutureMinuteBarReader,
)
from zipline.assets.roll_finder import (
CalendarRollFinder,
VolumeRollFinder,
)
from zipline.data.dispatch_bar_reader import (
AssetDispatchMinuteBarReader,
AssetDispatchSessionBarReader,
)
from zipline.data.resample import (
DailyHistoryAggregator,
ReindexMinuteBarReader,
ReindexSessionBarReader,
)
from zipline.data.history_loader import (
DailyHistoryLoader,
MinuteHistoryLoader,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.utils.memoize import remember_last
from zipline.utils.pandas_utils import normalize_date
from zipline.errors import HistoryWindowStartsBeforeData
log = Logger("DataPortal")
BASE_FIELDS = frozenset(
[
"open",
"high",
"low",
"close",
"volume",
"price",
"contract",
"sid",
"last_traded",
]
)
OHLCV_FIELDS = frozenset(["open", "high", "low", "close", "volume"])
OHLCVP_FIELDS = frozenset(["open", "high", "low", "close", "volume", "price"])
HISTORY_FREQUENCIES = set(["1m", "1d"])
DEFAULT_MINUTE_HISTORY_PREFETCH = 1560
DEFAULT_DAILY_HISTORY_PREFETCH = 40
_DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
_DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
class DataPortal(object):
"""Interface to all of the data that a zipline simulation needs.
This is used by the simulation runner to answer questions about the data,
like getting the prices of assets on a given day or to service history
calls.
Parameters
----------
asset_finder : zipline.assets.assets.AssetFinder
The AssetFinder instance used to resolve assets.
trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar
The calendar instance used to provide minute->session information.
first_trading_day : pd.Timestamp
The first trading day for the simulation.
equity_daily_reader : BcolzDailyBarReader, optional
The daily bar reader for equities. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
equity_minute_reader : BcolzMinuteBarReader, optional
The minute bar reader for equities. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
future_daily_reader : BcolzDailyBarReader, optional
The daily bar ready for futures. This will be used to service
daily data backtests or daily history calls in a minute backetest.
If a daily bar reader is not provided but a minute bar reader is,
the minutes will be rolled up to serve the daily requests.
future_minute_reader : BcolzFutureMinuteBarReader, optional
The minute bar reader for futures. This will be used to service
minute data backtests or minute history calls. This can be used
to serve daily calls if no daily bar reader is provided.
adjustment_reader : SQLiteAdjustmentWriter, optional
The adjustment reader. This is used to apply splits, dividends, and
other adjustment data to the raw data from the readers.
last_available_session : pd.Timestamp, optional
The last session to make available in session-level data.
last_available_minute : pd.Timestamp, optional
The last minute to make available in minute-level data.
"""
def __init__(
self,
asset_finder,
trading_calendar,
first_trading_day,
equity_daily_reader=None,
equity_minute_reader=None,
future_daily_reader=None,
future_minute_reader=None,
adjustment_reader=None,
last_available_session=None,
last_available_minute=None,
minute_history_prefetch_length=_DEF_M_HIST_PREFETCH,
daily_history_prefetch_length=_DEF_D_HIST_PREFETCH,
):
self.trading_calendar = trading_calendar
self.asset_finder = asset_finder
self._adjustment_reader = adjustment_reader
# caches of sid -> adjustment list
self._splits_dict = {}
self._mergers_dict = {}
self._dividends_dict = {}
# Handle extra sources, like Fetcher.
self._augmented_sources_map = {}
self._extra_source_df = None
self._first_available_session = first_trading_day
if last_available_session:
self._last_available_session = last_available_session
else:
# Infer the last session from the provided readers.
last_sessions = [
reader.last_available_dt
for reader in [equity_daily_reader, future_daily_reader]
if reader is not None
]
if last_sessions:
self._last_available_session = min(last_sessions)
else:
self._last_available_session = None
if last_available_minute:
self._last_available_minute = last_available_minute
else:
# Infer the last minute from the provided readers.
last_minutes = [
reader.last_available_dt
for reader in [equity_minute_reader, future_minute_reader]
if reader is not None
]
if last_minutes:
self._last_available_minute = max(last_minutes)
else:
self._last_available_minute = None
aligned_equity_minute_reader = self._ensure_reader_aligned(equity_minute_reader)
aligned_equity_session_reader = self._ensure_reader_aligned(equity_daily_reader)
aligned_future_minute_reader = self._ensure_reader_aligned(future_minute_reader)
aligned_future_session_reader = self._ensure_reader_aligned(future_daily_reader)
self._roll_finders = {
"calendar": CalendarRollFinder(self.trading_calendar, self.asset_finder),
}
aligned_minute_readers = {}
aligned_session_readers = {}
if aligned_equity_minute_reader is not None:
aligned_minute_readers[Equity] = aligned_equity_minute_reader
if aligned_equity_session_reader is not None:
aligned_session_readers[Equity] = aligned_equity_session_reader
if aligned_future_minute_reader is not None:
aligned_minute_readers[Future] = aligned_future_minute_reader
aligned_minute_readers[ContinuousFuture] = ContinuousFutureMinuteBarReader(
aligned_future_minute_reader,
self._roll_finders,
)
if aligned_future_session_reader is not None:
aligned_session_readers[Future] = aligned_future_session_reader
self._roll_finders["volume"] = VolumeRollFinder(
self.trading_calendar,
self.asset_finder,
aligned_future_session_reader,
)
aligned_session_readers[
ContinuousFuture
] = ContinuousFutureSessionBarReader(
aligned_future_session_reader,
self._roll_finders,
)
_dispatch_minute_reader = AssetDispatchMinuteBarReader(
self.trading_calendar,
self.asset_finder,
aligned_minute_readers,
self._last_available_minute,
)
_dispatch_session_reader = AssetDispatchSessionBarReader(
self.trading_calendar,
self.asset_finder,
aligned_session_readers,
self._last_available_session,
)
self._pricing_readers = {
"minute": _dispatch_minute_reader,
"daily": _dispatch_session_reader,
}
self._daily_aggregator = DailyHistoryAggregator(
self.trading_calendar.schedule.market_open,
_dispatch_minute_reader,
self.trading_calendar,
)
self._history_loader = DailyHistoryLoader(
self.trading_calendar,
_dispatch_session_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=daily_history_prefetch_length,
)
self._minute_history_loader = MinuteHistoryLoader(
self.trading_calendar,
_dispatch_minute_reader,
self._adjustment_reader,
self.asset_finder,
self._roll_finders,
prefetch_length=minute_history_prefetch_length,
)
self._first_trading_day = first_trading_day
# Get the first trading minute
self._first_trading_minute, _ = (
self.trading_calendar.open_and_close_for_session(self._first_trading_day)
if self._first_trading_day is not None
else (None, None)
)
# Store the locs of the first day and first minute
self._first_trading_day_loc = (
self.trading_calendar.all_sessions.get_loc(self._first_trading_day)
if self._first_trading_day is not None
else None
)
def _ensure_reader_aligned(self, reader):
if reader is None:
return
if reader.trading_calendar.name == self.trading_calendar.name:
return reader
elif reader.data_frequency == "minute":
return ReindexMinuteBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session,
)
elif reader.data_frequency == "session":
return ReindexSessionBarReader(
self.trading_calendar,
reader,
self._first_available_session,
self._last_available_session,
)
def _reindex_extra_source(self, df, source_date_index):
return df.reindex(index=source_date_index, method="ffill")
def handle_extra_source(self, source_df, sim_params):
"""
Extra sources always have a sid column.
We expand the given data (by forward filling) to the full range of
the simulation dates, so that lookup is fast during simulation.
"""
if source_df is None:
return
# Normalize all the dates in the df
source_df.index = source_df.index.normalize()
# source_df's sid column can either consist of assets we know about
# (such as sid(24)) or of assets we don't know about (such as
# palladium).
#
# In both cases, we break up the dataframe into individual dfs
# that only contain a single asset's information. ie, if source_df
# has data for PALLADIUM and GOLD, we split source_df into two
# dataframes, one for each. (same applies if source_df has data for
# AAPL and IBM).
#
# We then take each child df and reindex it to the simulation's date
# range by forward-filling missing values. this makes reads simpler.
#
# Finally, we store the data. For each column, we store a mapping in
# self.augmented_sources_map from the column to a dictionary of
# asset -> df. In other words,
# self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df
# holding that data.
source_date_index = self.trading_calendar.sessions_in_range(
sim_params.start_session, sim_params.end_session
)
# Break the source_df up into one dataframe per sid. This lets
# us (more easily) calculate accurate start/end dates for each sid,
# de-dup data, and expand the data to fit the backtest start/end date.
grouped_by_sid = source_df.groupby(["sid"])
group_names = grouped_by_sid.groups.keys()
group_dict = {}
for group_name in group_names:
group_dict[group_name] = grouped_by_sid.get_group(group_name)
# This will be the dataframe which we query to get fetcher assets at
# any given time. Get's overwritten every time there's a new fetcher
# call
extra_source_df = pd.DataFrame()
for identifier, df in group_dict.items():
# Since we know this df only contains a single sid, we can safely
# de-dupe by the index (dt). If minute granularity, will take the
# last data point on any given day
df = df.groupby(level=0).last()
# Reindex the dataframe based on the backtest start/end date.
# This makes reads easier during the backtest.
df = self._reindex_extra_source(df, source_date_index)
for col_name in df.columns.difference(["sid"]):
if col_name not in self._augmented_sources_map:
self._augmented_sources_map[col_name] = {}
self._augmented_sources_map[col_name][identifier] = df
# Append to extra_source_df the reindexed dataframe for the single
# sid
extra_source_df = extra_source_df.append(df)
self._extra_source_df = extra_source_df
def _get_pricing_reader(self, data_frequency):
return self._pricing_readers[data_frequency]
def get_last_traded_dt(self, asset, dt, data_frequency):
"""
Given an asset and dt, returns the last traded dt from the viewpoint
of the given dt.
If there is a trade on the dt, the answer is dt provided.
"""
return self._get_pricing_reader(data_frequency).get_last_traded_dt(asset, dt)
@staticmethod
def _is_extra_source(asset, field, map):
"""
Internal method that determines if this asset/field combination
represents a fetcher value or a regular OHLCVP lookup.
"""
# If we have an extra source with a column called "price", only look
# at it if it's on something like palladium and not AAPL (since our
# own price data always wins when dealing with assets).
return not (
field in BASE_FIELDS and (isinstance(asset, (Asset, ContinuousFuture)))
)
def _get_fetcher_value(self, asset, field, dt):
day = normalize_date(dt)
try:
return self._augmented_sources_map[field][asset].loc[day, field]
except KeyError:
return np.NaN
def _get_single_asset_value(self, session_label, asset, field, dt, data_frequency):
if self._is_extra_source(asset, field, self._augmented_sources_map):
return self._get_fetcher_value(asset, field, dt)
if field not in BASE_FIELDS:
raise KeyError("Invalid column: " + str(field))
if (
dt < asset.start_date
or (data_frequency == "daily" and session_label > asset.end_date)
or (data_frequency == "minute" and session_label > asset.end_date)
):
if field == "volume":
return 0
elif field == "contract":
return None
elif field != "last_traded":
return np.NaN
if data_frequency == "daily":
if field == "contract":
return self._get_current_contract(asset, session_label)
else:
return self._get_daily_spot_value(
asset,
field,
session_label,
)
else:
if field == "last_traded":
return self.get_last_traded_dt(asset, dt, "minute")
elif field == "price":
return self._get_minute_spot_value(
asset,
"close",
dt,
ffill=True,
)
elif field == "contract":
return self._get_current_contract(asset, dt)
else:
return self._get_minute_spot_value(asset, field, dt)
def get_spot_value(self, assets, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset, ContinuousFuture, or iterable of same.
The asset or assets whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
assets_is_scalar = False
if isinstance(assets, (AssetConvertible, PricingDataAssociable)):
assets_is_scalar = True
else:
# If 'assets' was not one of the expected types then it should be
# an iterable.
try:
iter(assets)
except TypeError:
raise TypeError(
"Unexpected 'assets' value of type {}.".format(type(assets))
)
session_label = self.trading_calendar.minute_to_session_label(dt)
if assets_is_scalar:
return self._get_single_asset_value(
session_label,
assets,
field,
dt,
data_frequency,
)
else:
get_single_asset_value = self._get_single_asset_value
return [
get_single_asset_value(
session_label,
asset,
field,
dt,
data_frequency,
)
for asset in assets
]
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency):
"""
Public API method that returns a scalar value representing the value
of the desired asset's field at either the given dt.
Parameters
----------
assets : Asset
The asset or assets whose data is desired. This cannot be
an arbitrary AssetConvertible.
field : {'open', 'high', 'low', 'close', 'volume',
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The spot value of ``field`` for ``asset`` The return type is based
on the ``field`` requested. If the field is one of 'open', 'high',
'low', 'close', or 'price', the value will be a float. If the
``field`` is 'volume' the value will be a int. If the ``field`` is
'last_traded' the value will be a Timestamp.
"""
return self._get_single_asset_value(
self.trading_calendar.minute_to_session_label(dt),
asset,
field,
dt,
data_frequency,
)
def get_adjustments(self, assets, field, dt, perspective_dt):
"""
Returns a list of adjustments between the dt and perspective_dt for the
given field and list of assets
Parameters
----------
assets : list of type Asset, or Asset
The asset, or assets whose adjustments are desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
Returns
-------
adjustments : list[Adjustment]
The adjustments to that field.
"""
if isinstance(assets, Asset):
assets = [assets]
adjustment_ratios_per_asset = []
def split_adj_factor(x):
return x if field != "volume" else 1.0 / x
for asset in assets:
adjustments_for_asset = []
split_adjustments = self._get_adjustment_list(
asset, self._splits_dict, "SPLITS"
)
for adj_dt, adj in split_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(split_adj_factor(adj))
elif adj_dt > perspective_dt:
break
if field != "volume":
merger_adjustments = self._get_adjustment_list(
asset, self._mergers_dict, "MERGERS"
)
for adj_dt, adj in merger_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
dividend_adjustments = self._get_adjustment_list(
asset,
self._dividends_dict,
"DIVIDENDS",
)
for adj_dt, adj in dividend_adjustments:
if dt < adj_dt <= perspective_dt:
adjustments_for_asset.append(adj)
elif adj_dt > perspective_dt:
break
ratio = reduce(mul, adjustments_for_asset, 1.0)
adjustment_ratios_per_asset.append(ratio)
return adjustment_ratios_per_asset
def get_adjusted_value(
self, asset, field, dt, perspective_dt, data_frequency, spot_value=None
):
"""
Returns a scalar value representing the value
of the desired asset's field at the given dt with adjustments applied.
Parameters
----------
asset : Asset
The asset whose data is desired.
field : {'open', 'high', 'low', 'close', 'volume', \
'price', 'last_traded'}
The desired field of the asset.
dt : pd.Timestamp
The timestamp for the desired value.
perspective_dt : pd.Timestamp
The timestamp from which the data is being viewed back from.
data_frequency : str
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars
Returns
-------
value : float, int, or pd.Timestamp
The value of the given ``field`` for ``asset`` at ``dt`` with any
adjustments known by ``perspective_dt`` applied. The return type is
based on the ``field`` requested. If the field is one of 'open',
'high', 'low', 'close', or 'price', the value will be a float. If
the ``field`` is 'volume' the value will be a int. If the ``field``
is 'last_traded' the value will be a Timestamp.
"""
if spot_value is None:
# if this a fetcher field, we want to use perspective_dt (not dt)
# because we want the new value as of midnight (fetcher only works
# on a daily basis, all timestamps are on midnight)
if self._is_extra_source(asset, field, self._augmented_sources_map):
spot_value = self.get_spot_value(
asset, field, perspective_dt, data_frequency
)
else:
spot_value = self.get_spot_value(asset, field, dt, data_frequency)
if isinstance(asset, Equity):
ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0]
spot_value *= ratio
return spot_value
def _get_minute_spot_value(self, asset, column, dt, ffill=False):
reader = self._get_pricing_reader("minute")
if not ffill:
try:
return reader.get_value(asset.sid, dt, column)
except NoDataOnDate:
if column != "volume":
return np.nan
else:
return 0
# At this point the pairing of column='close' and ffill=True is
# assumed.
try:
# Optimize the best case scenario of a liquid asset
# returning a valid price.
result = reader.get_value(asset.sid, dt, column)
if not pd.isnull(result):
return result
except NoDataOnDate:
# Handling of no data for the desired date is done by the
# forward filling logic.
# The last trade may occur on a previous day.
pass
# If forward filling, we want the last minute with values (up to
# and including dt).
query_dt = reader.get_last_traded_dt(asset, dt)
if pd.isnull(query_dt):
# no last traded dt, bail
return np.nan
result = reader.get_value(asset.sid, query_dt, column)
if (dt == query_dt) or (dt.date() == query_dt.date()):
return result
# the value we found came from a different day, so we have to
# adjust the data if there are any adjustments on that day barrier
return self.get_adjusted_value(
asset, column, query_dt, dt, "minute", spot_value=result
)
def _get_daily_spot_value(self, asset, column, dt):
reader = self._get_pricing_reader("daily")
if column == "last_traded":
last_traded_dt = reader.get_last_traded_dt(asset, dt)
if isnull(last_traded_dt):
return pd.NaT
else:
return last_traded_dt
elif column in OHLCV_FIELDS:
# don't forward fill
try:
return reader.get_value(asset, dt, column)
except NoDataOnDate:
return np.nan
elif column == "price":
found_dt = dt
while True:
try:
value = reader.get_value(asset, found_dt, "close")
if not isnull(value):
if dt == found_dt:
return value
else:
# adjust if needed
return self.get_adjusted_value(
asset, column, found_dt, dt, "minute", spot_value=value
)
else:
found_dt -= self.trading_calendar.day
except NoDataOnDate:
return np.nan
@remember_last
def _get_days_for_window(self, end_date, bar_count):
tds = self.trading_calendar.all_sessions
end_loc = tds.get_loc(end_date)
start_loc = end_loc - bar_count + 1
if start_loc < self._first_trading_day_loc:
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=tds[self._first_trading_day_loc + bar_count].date(),
)
return tds[start_loc : end_loc + 1]
def _get_history_daily_window(
self, assets, end_dt, bar_count, field_to_use, data_frequency
):
"""
Internal method that returns a dataframe containing history bars
of daily frequency for the given sids.
"""
session = self.trading_calendar.minute_to_session_label(end_dt)
days_for_window = self._get_days_for_window(session, bar_count)
if len(assets) == 0:
return pd.DataFrame(None, index=days_for_window, columns=None)
data = self._get_history_daily_window_data(
assets, days_for_window, end_dt, field_to_use, data_frequency
)
return pd.DataFrame(data, index=days_for_window, columns=assets)
def _get_history_daily_window_data(
self, assets, days_for_window, end_dt, field_to_use, data_frequency
):
if data_frequency == "daily":
# two cases where we use daily data for the whole range:
# 1) the history window ends at midnight utc.
# 2) the last desired day of the window is after the
# last trading day, use daily data for the whole range.
return self._get_daily_window_data(
assets, field_to_use, days_for_window, extra_slot=False
)
else:
# minute mode, requesting '1d'
daily_data = self._get_daily_window_data(
assets, field_to_use, days_for_window[0:-1]
)
if field_to_use == "open":
minute_value = self._daily_aggregator.opens(assets, end_dt)
elif field_to_use == "high":
minute_value = self._daily_aggregator.highs(assets, end_dt)
elif field_to_use == "low":
minute_value = self._daily_aggregator.lows(assets, end_dt)
elif field_to_use == "close":
minute_value = self._daily_aggregator.closes(assets, end_dt)
elif field_to_use == "volume":
minute_value = self._daily_aggregator.volumes(assets, end_dt)
elif field_to_use == "sid":
minute_value = [
int(self._get_current_contract(asset, end_dt)) for asset in assets
]
# append the partial day.
daily_data[-1] = minute_value
return daily_data
def _handle_minute_history_out_of_bounds(self, bar_count):
cal = self.trading_calendar
first_trading_minute_loc = (
cal.all_minutes.get_loc(self._first_trading_minute)
if self._first_trading_minute is not None
else None
)
suggested_start_day = cal.minute_to_session_label(
cal.all_minutes[first_trading_minute_loc + bar_count] + cal.day
)
raise HistoryWindowStartsBeforeData(
first_trading_day=self._first_trading_day.date(),
bar_count=bar_count,
suggested_start_day=suggested_start_day.date(),
)
def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use):
"""
Internal method that returns a dataframe containing history bars
of minute frequency for the given sids.
"""
# get all the minutes for this window
try:
minutes_for_window = self.trading_calendar.minutes_window(
end_dt, -bar_count
)
except KeyError:
self._handle_minute_history_out_of_bounds(bar_count)
if minutes_for_window[0] < self._first_trading_minute:
self._handle_minute_history_out_of_bounds(bar_count)
asset_minute_data = self._get_minute_window_data(
assets,
field_to_use,
minutes_for_window,
)
return pd.DataFrame(asset_minute_data, index=minutes_for_window, columns=assets)
def get_history_window(
self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True
):
"""
Public API method that returns a dataframe containing the requested
history window. Data is fully adjusted.
Parameters
----------
assets : list of zipline.data.Asset objects
The assets whose data is desired.
bar_count: int
The number of bars desired.
frequency: string
"1d" or "1m"
field: string
The desired field of the asset.
data_frequency: string
The frequency of the data to query; i.e. whether the data is
'daily' or 'minute' bars.
ffill: boolean
Forward-fill missing values. Only has effect if field
is 'price'.
Returns
-------
A dataframe containing the requested data.
"""
if field not in OHLCVP_FIELDS and field != "sid":
raise ValueError("Invalid field: {0}".format(field))
if bar_count < 1:
raise ValueError("bar_count must be >= 1, but got {}".format(bar_count))
if frequency == "1d":
if field == "price":
df = self._get_history_daily_window(
assets, end_dt, bar_count, "close", data_frequency
)
else:
df = self._get_history_daily_window(
assets, end_dt, bar_count, field, data_frequency
)
elif frequency == "1m":
if field == "price":
df = self._get_history_minute_window(assets, end_dt, bar_count, "close")
else:
df = self._get_history_minute_window(assets, end_dt, bar_count, field)
else:
raise ValueError("Invalid frequency: {0}".format(frequency))
# forward-fill price
if field == "price":
if frequency == "1m":
ffill_data_frequency = "minute"
elif frequency == "1d":
ffill_data_frequency = "daily"
else:
raise Exception("Only 1d and 1m are supported for forward-filling.")
assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0]
history_start, history_end = df.index[[0, -1]]
if ffill_data_frequency == "daily" and data_frequency == "minute":
# When we're looking for a daily value, but we haven't seen any
# volume in today's minute bars yet, we need to use the
# previous day's ffilled daily price. Using today's daily price
# could yield a value from later today.
history_start -= self.trading_calendar.day
initial_values = []
for asset in df.columns[assets_with_leading_nan]:
last_traded = self.get_last_traded_dt(
asset,
history_start,
ffill_data_frequency,
)
if isnull(last_traded):
initial_values.append(nan)
else:
initial_values.append(
self.get_adjusted_value(
asset,
field,
dt=last_traded,
perspective_dt=history_end,
data_frequency=ffill_data_frequency,
)
)
# Set leading values for assets that were missing data, then ffill.
df.iloc[0, assets_with_leading_nan] = np.array(
initial_values, dtype=np.float64
)
df.fillna(method="ffill", inplace=True)
# forward-filling will incorrectly produce values after the end of
# an asset's lifetime, so write NaNs back over the asset's
# end_date.
normed_index = df.index.normalize()
for asset in df.columns:
if history_end >= asset.end_date:
# if the window extends past the asset's end date, set
# all post-end-date values to NaN in that asset's series
df.loc[normed_index > asset.end_date, asset] = nan
return df
def _get_minute_window_data(self, assets, field, minutes_for_window):
"""
Internal method that gets a window of adjusted minute data for an asset
and specified date range. Used to support the history API method for
minute bars.
Missing bars are filled with NaN.
Parameters
----------
assets : iterable[Asset]
The assets whose data is desired.
field: string
The specific field to return. "open", "high", "close_price", etc.
minutes_for_window: pd.DateTimeIndex
The list of minutes representing the desired window. Each minute
is a pd.Timestamp.
Returns
-------
A numpy array with requested values.
"""
return self._minute_history_loader.history(
assets, minutes_for_window, field, False
)
def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True):
"""
Internal method that gets a window of adjusted daily data for a sid
and specified date range. Used to support the history API method for
daily bars.
Parameters
----------
asset : Asset
The asset whose data is desired.
start_dt: pandas.Timestamp
The start of the desired window of data.
bar_count: int
The number of days of data to return.
field: string
The specific field to return. "open", "high", "close_price", etc.
extra_slot: boolean
Whether to allocate an extra slot in the returned numpy array.
This extra slot will hold the data for the last partial day. It's
much better to create it here than to create a copy of the array
later just to add a slot.
Returns
-------
A numpy array with requested values. Any missing slots filled with
nan.
"""
bar_count = len(days_in_window)
# create an np.array of size bar_count
dtype = float64 if field != "sid" else int64
if extra_slot:
return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype)
else:
return_array = np.zeros((bar_count, len(assets)), dtype=dtype)
if field != "volume":
# volumes default to 0, so we don't need to put NaNs in the array
return_array = return_array.astype(float64)
return_array[:] = np.NAN
if bar_count != 0:
data = self._history_loader.history(
assets, days_in_window, field, extra_slot
)
if extra_slot:
return_array[: len(return_array) - 1, :] = data
else:
return_array[: len(data)] = data
return return_array
def _get_adjustment_list(self, asset, adjustments_dict, table_name):
"""
Internal method that returns a list of adjustments for the given sid.
Parameters
----------
asset : Asset
The asset for which to return adjustments.
adjustments_dict: dict
A dictionary of sid -> list that is used as a cache.
table_name: string
The table that contains this data in the adjustments db.
Returns
-------
adjustments: list
A list of [multiplier, pd.Timestamp], earliest first
"""
if self._adjustment_reader is None:
return []
sid = int(asset)
try:
adjustments = adjustments_dict[sid]
except KeyError:
adjustments = adjustments_dict[
sid
] = self._adjustment_reader.get_adjustments_for_sid(table_name, sid)
return adjustments
def get_splits(self, assets, dt):
"""
Returns any splits for the given sids and the given dt.
Parameters
----------
assets : container
Assets for which we want splits.
dt : pd.Timestamp
The date for which we are checking for splits. Note: this is
expected to be midnight UTC.
Returns
-------
splits : list[(asset, float)]
List of splits, where each split is a (asset, ratio) tuple.
"""
if self._adjustment_reader is None or not assets:
return []
# convert dt to # of seconds since epoch, because that's what we use
# in the adjustments db
seconds = int(dt.value / 1e9)
splits = self._adjustment_reader.conn.execute(
"SELECT sid, ratio FROM SPLITS WHERE effective_date = ?", (seconds,)
).fetchall()
splits = [split for split in splits if split[0] in assets]
splits = [
(self.asset_finder.retrieve_asset(split[0]), split[1]) for split in splits
]
return splits
def get_stock_dividends(self, sid, trading_days):
"""
Returns all the stock dividends for a specific sid that occur
in the given trading range.
Parameters
----------
sid: int
The asset whose stock dividends should be returned.
trading_days: pd.DatetimeIndex
The trading range.
Returns
-------
list: A list of objects with all relevant attributes populated.
All timestamp fields are converted to pd.Timestamps.
"""
if self._adjustment_reader is None:
return []
if len(trading_days) == 0:
return []
start_dt = trading_days[0].value / 1e9
end_dt = trading_days[-1].value / 1e9
dividends = self._adjustment_reader.conn.execute(
"SELECT declared_date, ex_date, pay_date, payment_sid, ratio, "
"record_date, sid FROM stock_dividend_payouts "
"WHERE sid = ? AND ex_date > ? AND pay_date < ?",
(
int(sid),
start_dt,
end_dt,
),
).fetchall()
dividend_info = []
for dividend_tuple in dividends:
dividend_info.append(
{
"declared_date": pd.Timestamp(dividend_tuple[0], unit="s"),
"ex_date": pd.Timestamp(dividend_tuple[1], unit="s"),
"pay_date": pd.Timestamp(dividend_tuple[2], unit="s"),
"payment_sid": dividend_tuple[3],
"ratio": dividend_tuple[4],
"record_date": pd.Timestamp(dividend_tuple[5], unit="s"),
"sid": dividend_tuple[6],
}
)
return dividend_info
def contains(self, asset, field):
return field in BASE_FIELDS or (
field in self._augmented_sources_map
and asset in self._augmented_sources_map[field]
)
def get_fetcher_assets(self, dt):
"""
Returns a list of assets for the current date, as defined by the
fetcher data.
Returns
-------
list: a list of Asset objects.
"""
# return a list of assets for the current date, as defined by the
# fetcher source
if self._extra_source_df is None:
return []
day = normalize_date(dt)
if day in self._extra_source_df.index:
assets = self._extra_source_df.loc[day]["sid"]
else:
return []
if isinstance(assets, pd.Series):
return [x for x in assets if isinstance(x, Asset)]
else:
return [assets] if isinstance(assets, Asset) else []
def get_current_future_chain(self, continuous_future, dt):
"""
Retrieves the future chain for the contract at the given `dt` according
the `continuous_future` specification.
Returns
-------
future_chain : list[Future]
A list of active futures, where the first index is the current
contract specified by the continuous future definition, the second
is the next upcoming contract and so on.
"""
rf = self._roll_finders[continuous_future.roll_style]
session = self.trading_calendar.minute_to_session_label(dt)
contract_center = rf.get_contract_center(
continuous_future.root_symbol, session, continuous_future.offset
)
oc = self.asset_finder.get_ordered_contracts(continuous_future.root_symbol)
chain = oc.active_chain(contract_center, session.value)
return self.asset_finder.retrieve_all(chain)
def _get_current_contract(self, continuous_future, dt):
rf = self._roll_finders[continuous_future.roll_style]
contract_sid = rf.get_contract_center(
continuous_future.root_symbol, dt, continuous_future.offset
)
if contract_sid is None:
return None
return self.asset_finder.retrieve_asset(contract_sid)
@property
def adjustment_reader(self):
return self._adjustment_reader
#--------------------------------------------------------------------
#20230516 轉出bundle資料 (by MRC) #start#
def get_bundle_price(bundle_name,
calendar_name,
start_dt,
end_dt,
frequency='1d',
data_frequency='daily',
assets = None):
"""轉出bundle的價格資料(調整前+調整後)
Parameters
----------
bundle_name : str
The name of the bundle.
calendar_name : str, optional
The name of a calendar used to align bundle data.
start_dt: pandas.Timestamp
The start of the desired window of data.
end_dt: pandas.Timestamp
The end session of the desired window of data.
frequency: string, optional
"1d"
data_frequency: string, optional
The frequency of the data to query; i.e. 'daily'
assets : list of zipline.data.Asset objects, optional
The assets whose data is desired.
Returns
-------
df_bundle : pd.DataFrame
bundle的價格資料(調整前+調整後)
Modification history
-------
20230516:新增 (by MRC)
See Also
--------
https://github.com/quantopian/zipline/issues/2651
Existing Problems
--------
若get_calendar(calendar_name).schedule中的最小日期晚於bundle.equity_daily_bar_reader.first_trading_day
get_bundle_data()會報錯:
NotSessionError: Parameter `session_label` takes a session label although received input that parsed
to '2003-04-30 00:00:00+00:00' which is earlier than the first session of calendar 'XTAI'
('2003-05-02 00:00:00+00:00').
解法:
1. Kernal restart,並重新ingest。
2. 將first_trading_day改為get_calendar(calendar_name).schedule.index.min()。
3. 改用BarData.history()取資料。(之前是用DataPortal.get_history_window(),一次只能抓一個欄位)
"""
bundle = bundles.load(bundle_name)
if assets is None:
sids = bundle.asset_finder.equities_sids
assets = bundle.asset_finder.retrieve_all(sids)
def get_history(adj):
if adj==True:
adjustment_reader=bundle.adjustment_reader
else:
adjustment_reader=None
Portal = DataPortal(asset_finder=bundle.asset_finder,
trading_calendar=get_calendar(calendar_name),
first_trading_day=bundle.equity_daily_bar_reader.first_trading_day,
# equity_minute_reader=bundle.equity_minute_bar_reader,
equity_daily_reader=bundle.equity_daily_bar_reader,
adjustment_reader=adjustment_reader
)
Bar = BarData(data_portal=Portal,
simulation_dt_func=lambda: end_dt,
data_frequency=data_frequency,
trading_calendar=get_calendar(calendar_name),
restrictions=NoRestrictions()
)
df = Bar.history(assets=assets,
fields=['open','high','low','close','volume'],
bar_count=N_tradate,
frequency=frequency)
# symbol及sid
df.index.set_names(['date','asset'],inplace=True)
df = df.reset_index()
df.insert(1,'symbol',df['asset'].apply(lambda x: x.symbol))
df.insert(1,'sid',df['asset'].apply(lambda x: x.sid))
return df
# 算交易日個數
dt = get_calendar(calendar_name).sessions_in_range(start_dt,end_dt)
N_tradate = len(dt)
# 調整後
df_bundle_price_adj = get_history(True)
# 未調整
df_bundle_price = get_history(False)
# 合併
df_bundle = pd.merge(df_bundle_price,
df_bundle_price_adj.drop(columns=['asset','symbol']),
on=['date','sid'],
suffixes=[None,'_adj'])
return df_bundle
def get_bundle_adj(bundle_name):
"""轉出bundle的調整資料
Parameters
----------
bundle_name : str
The name of the bundle.
Returns
-------
df_adj : pd.DataFrame
bundle的調整資料
Modification history
-------
20230516:新增 (by MRC)
20230525:刪除cash_back欄位,新增div_percent(配合bundle)
See Also
--------
Existing Problems
--------
"""
bundle = bundles.load(bundle_name)
# 取得所有的adjustment
df_adjustment = bundle.adjustment_reader.unpack_db_to_component_dfs(convert_dates=True)
# dividend_payouts
df_dividend_payouts = df_adjustment['dividend_payouts'].rename(columns={'ex_date':'effective_date',
'pay_date':'dividend_payouts.pay_date',
'record_date':'dividend_payouts.record_date',
'declared_date':'dividend_payouts.declared_date',
'amount':'dividend_payouts.amount',
#'cash_back':'dividend_payouts.cash_back',
'div_percent':'dividend_payouts.div_percent',
}).set_index(['effective_date','sid'])
# stock_dividend_payouts
df_stock_dividend_payouts = df_adjustment['stock_dividend_payouts'].rename(columns={'ex_date':'effective_date',
'pay_date':'stock_dividend_payouts.pay_date',
'record_date':'stock_dividend_payouts.record_date',
'declared_date':'stock_dividend_payouts.declared_date',
'amount':'stock_dividend_payouts.amount'
}).set_index(['effective_date','sid'])
# splits
df_splits = df_adjustment['splits'].rename(columns={'ratio':'splits.ratio'}).\
set_index(['effective_date','sid'])
# mergers
df_mergers = df_adjustment['mergers'].rename(columns={'ratio':'mergers.ratio'}).\
set_index(['effective_date','sid'])
# dividends
df_dividends = df_adjustment['dividends'].rename(columns={'ratio':'dividends.ratio'}).\
set_index(['effective_date','sid'])
df_adj = df_dividend_payouts.merge(df_dividends,how='outer',left_index=True, right_index=True)
df_adj = df_adj.merge(df_splits,how='outer',left_index=True, right_index=True)
df_adj = df_adj.merge(df_mergers,how='outer',left_index=True, right_index=True)
df_adj.index.set_names(['date','sid'],inplace=True)
df_adj.reset_index(inplace=True)
df_adj.insert(2,'asset',df_adj['sid'].apply(lambda x: bundle.asset_finder.retrieve_asset(x)))
df_adj.insert(2,'symbol',df_adj['asset'].apply(lambda x: x.symbol))
return df_adj
def get_bundle(bundle_name,
calendar_name,
start_dt,
end_dt,
frequency='1d',
data_frequency='daily',
assets=None):
"""轉出bundle的價格資料(調整前+調整後)及調整資料
Parameters
----------
bundle_name : str
The name of the bundle.
calendar_name : str, optional
The name of a calendar used to align bundle data.
start_dt: pandas.Timestamp
The start of the desired window of data.
end_dt: pandas.Timestamp
The end session of the desired window of data.
frequency: string, optional
"1d"
data_frequency: string, optional
The frequency of the data to query; i.e. 'daily'
assets : list of zipline.data.Asset objects, optional
The assets whose data is desired.
Returns
-------
df : pd.DataFrame
bundle的價格資料(調整前+調整後)及調整資料
Modification history
-------
20230516:新增 (by MRC)
See Also
--------
https://github.com/quantopian/zipline/issues/2651
Existing Problems
--------
若get_calendar(calendar_name).schedule中的最小日期晚於bundle.equity_daily_bar_reader.first_trading_day
get_bundle_data()會報錯:
NotSessionError: Parameter `session_label` takes a session label although received input that parsed
to '2003-04-30 00:00:00+00:00' which is earlier than the first session of calendar 'XTAI'
('2003-05-02 00:00:00+00:00').
解法:
1. Kernal restart,並重新ingest。
2. 將first_trading_day改為get_calendar(calendar_name).schedule.index.min()。
"""
df_bundle_price = get_bundle_price(bundle_name=bundle_name,
calendar_name=calendar_name,
start_dt=start_dt,
end_dt=end_dt,
assets=assets,
frequency=frequency,
data_frequency=data_frequency)
df_bundle_adj = get_bundle_adj(bundle_name)
df = df_bundle_price.merge(df_bundle_adj.drop(columns=['asset','symbol']),
on=['date','sid'],
how='left')
return df
#20230516 轉出bundle資料 (by MRC) #end#
#-------------------------------------------------------------------- | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/data_portal.py | data_portal.py |
from collections import namedtuple
from errno import ENOENT
from os import remove
from logbook import Logger
import numpy as np
from numpy import integer as any_integer
import pandas as pd
from pandas import Timestamp
import sqlite3
from zipline.utils.functional import keysorted
from zipline.utils.input_validation import preprocess
from zipline.utils.numpy_utils import (
datetime64ns_dtype,
float64_dtype,
int64_dtype,
uint32_dtype,
uint64_dtype,
)
from zipline.utils.pandas_utils import empty_dataframe
from zipline.utils.sqlite_utils import group_into_chunks, coerce_string_to_conn
from ._adjustments import load_adjustments_from_sqlite
log = Logger(__name__)
SQLITE_ADJUSTMENT_TABLENAMES = frozenset(["splits", "dividends", "mergers"])
UNPAID_QUERY_TEMPLATE = """
SELECT sid, amount, pay_date from dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
Dividend = namedtuple("Dividend", ["asset", "amount", "pay_date"])
UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """
SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts
WHERE ex_date=? AND sid IN ({0})
"""
StockDividend = namedtuple(
"StockDividend",
["asset", "payment_asset", "ratio", "pay_date"],
)
SQLITE_ADJUSTMENT_COLUMN_DTYPES = {
"effective_date": any_integer,
"ratio": float64_dtype,
"sid": any_integer,
}
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
"sid": any_integer,
"ex_date": any_integer,
"declared_date": any_integer,
"record_date": any_integer,
"pay_date": any_integer,
"amount": float,
"div_percent":float,
}
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = {
"sid": any_integer,
"ex_date": any_integer,
"declared_date": any_integer,
"record_date": any_integer,
"pay_date": any_integer,
"payment_sid": any_integer,
"ratio": float,
}
def specialize_any_integer(d):
out = {}
for k, v in d.items():
if v is any_integer:
out[k] = int64_dtype
else:
out[k] = v
return out
class SQLiteAdjustmentReader(object):
"""
Loads adjustments based on corporate actions from a SQLite database.
Expects data written in the format output by `SQLiteAdjustmentWriter`.
Parameters
----------
conn : str or sqlite3.Connection
Connection from which to load data.
See Also
--------
:class:`zipline.data.adjustments.SQLiteAdjustmentWriter`
"""
_datetime_int_cols = {
"splits": ("effective_date",),
"mergers": ("effective_date",),
"dividends": ("effective_date",),
"dividend_payouts": (
"declared_date",
"ex_date",
"pay_date",
"record_date",
),
"stock_dividend_payouts": (
"declared_date",
"ex_date",
"pay_date",
"record_date",
),
}
_raw_table_dtypes = {
# We use any_integer above to be lenient in accepting different dtypes
# from users. For our outputs, however, we always want to return the
# same types, and any_integer turns into int32 on some numpy windows
# builds, so specify int64 explicitly here.
"splits": specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES),
"mergers": specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES),
"dividends": specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES),
"dividend_payouts": specialize_any_integer(
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
),
"stock_dividend_payouts": specialize_any_integer(
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
),
}
@preprocess(conn=coerce_string_to_conn(require_exists=True))
def __init__(self, conn):
self.conn = conn
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
return self.conn.close()
def load_adjustments(
self,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type,
):
"""
Load collection of Adjustment objects from underlying adjustments db.
Parameters
----------
dates : pd.DatetimeIndex
Dates for which adjustments are needed.
assets : pd.Int64Index
Assets for which adjustments are needed.
should_include_splits : bool
Whether split adjustments should be included.
should_include_mergers : bool
Whether merger adjustments should be included.
should_include_dividends : bool
Whether dividend adjustments should be included.
adjustment_type : str
Whether price adjustments, volume adjustments, or both, should be
included in the output.
Returns
-------
adjustments : dict[str -> dict[int -> Adjustment]]
A dictionary containing price and/or volume adjustment mappings
from index to adjustment objects to apply at that index.
"""
return load_adjustments_from_sqlite(
self.conn,
dates,
assets,
should_include_splits,
should_include_mergers,
should_include_dividends,
adjustment_type,
)
def load_pricing_adjustments(self, columns, dates, assets):
if "volume" not in set(columns):
adjustment_type = "price"
elif len(set(columns)) == 1:
adjustment_type = "volume"
else:
adjustment_type = "all"
adjustments = self.load_adjustments(
dates,
assets,
should_include_splits=True,
should_include_mergers=True,
should_include_dividends=True,
adjustment_type=adjustment_type,
)
price_adjustments = adjustments.get("price")
volume_adjustments = adjustments.get("volume")
return [
volume_adjustments if column == "volume" else price_adjustments
for column in columns
]
def get_adjustments_for_sid(self, table_name, sid):
t = (sid,)
c = self.conn.cursor()
adjustments_for_sid = c.execute(
"SELECT effective_date, ratio FROM %s WHERE sid = ?" % table_name, t
).fetchall()
c.close()
return [
[Timestamp(adjustment[0], unit="s", tz="UTC"), adjustment[1]]
for adjustment in adjustments_for_sid
]
def get_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_QUERY_TEMPLATE.format(",".join(["?" for _ in chunk]))
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
div = Dividend(
asset_finder.retrieve_asset(row[0]),
row[1],
Timestamp(row[2], unit="s", tz="UTC"),
)
divs.append(div)
c.close()
return divs
def get_stock_dividends_with_ex_date(self, assets, date, asset_finder):
seconds = date.value / int(1e9)
c = self.conn.cursor()
stock_divs = []
for chunk in group_into_chunks(assets):
query = UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE.format(
",".join(["?" for _ in chunk])
)
t = (seconds,) + tuple(map(lambda x: int(x), chunk))
c.execute(query, t)
rows = c.fetchall()
for row in rows:
stock_div = StockDividend(
asset_finder.retrieve_asset(row[0]), # asset
asset_finder.retrieve_asset(row[1]), # payment_asset
row[2],
Timestamp(row[3], unit="s", tz="UTC"),
)
stock_divs.append(stock_div)
c.close()
return stock_divs
def unpack_db_to_component_dfs(self, convert_dates=False):
"""Returns the set of known tables in the adjustments file in DataFrame
form.
Parameters
----------
convert_dates : bool, optional
By default, dates are returned in seconds since EPOCH. If
convert_dates is True, all ints in date columns will be converted
to datetimes.
Returns
-------
dfs : dict{str->DataFrame}
Dictionary which maps table name to the corresponding DataFrame
version of the table, where all date columns have been coerced back
from int to datetime.
"""
return {
t_name: self.get_df_from_table(t_name, convert_dates)
for t_name in self._datetime_int_cols
}
def get_df_from_table(self, table_name, convert_dates=False):
try:
date_cols = self._datetime_int_cols[table_name]
except KeyError:
raise ValueError(
"Requested table {} not found.\n"
"Available tables: {}\n".format(
table_name, self._datetime_int_cols.keys()
)
)
# Dates are stored in second resolution as ints in adj.db tables.
kwargs = (
{"parse_dates": {col: {"unit": "s", "utc": True} for col in date_cols}}
if convert_dates
else {}
)
result = pd.read_sql(
'select * from "{}"'.format(table_name),
self.conn,
index_col="index",
**kwargs,
)
dtypes = self._df_dtypes(table_name, convert_dates)
if not len(result):
return empty_dataframe(*keysorted(dtypes))
result.rename_axis(None, inplace=True)
result = result[sorted(dtypes)] # ensure expected order of columns
return result
def _df_dtypes(self, table_name, convert_dates):
"""Get dtypes to use when unpacking sqlite tables as dataframes."""
out = self._raw_table_dtypes[table_name]
if convert_dates:
out = out.copy()
for date_column in self._datetime_int_cols[table_name]:
out[date_column] = datetime64ns_dtype
return out
class SQLiteAdjustmentWriter(object):
"""
Writer for data to be read by SQLiteAdjustmentReader
Parameters
----------
conn_or_path : str or sqlite3.Connection
A handle to the target sqlite database.
equity_daily_bar_reader : SessionBarReader
Daily bar reader to use for dividend writes.
overwrite : bool, optional, default=False
If True and conn_or_path is a string, remove any existing files at the
given path before connecting.
See Also
--------
zipline.data.adjustments.SQLiteAdjustmentReader
"""
def __init__(self, conn_or_path, equity_daily_bar_reader, overwrite=False):
if isinstance(conn_or_path, sqlite3.Connection):
self.conn = conn_or_path
elif isinstance(conn_or_path, str):
if overwrite:
try:
remove(conn_or_path)
except OSError as e:
if e.errno != ENOENT:
raise
self.conn = sqlite3.connect(conn_or_path)
self.uri = conn_or_path
else:
raise TypeError("Unknown connection type %s" % type(conn_or_path))
self._equity_daily_bar_reader = equity_daily_bar_reader
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def close(self):
self.conn.close()
def _write(self, tablename, expected_dtypes, frame):
if frame is None or frame.empty:
# keeping the dtypes correct for empty frames is not easy
# frame = pd.DataFrame(
# np.array([], dtype=list(expected_dtypes.items())),
# )
frame = pd.DataFrame(expected_dtypes, index=[])
else:
if frozenset(frame.columns) != frozenset(expected_dtypes):
raise ValueError(
"Unexpected frame columns:\n"
"Expected Columns: %s\n"
"Received Columns: %s"
% (
set(expected_dtypes),
frame.columns.tolist(),
)
)
actual_dtypes = frame.dtypes
for colname, expected in expected_dtypes.items():
actual = actual_dtypes[colname]
if not np.issubdtype(actual, expected):
raise TypeError(
"Expected data of type {expected} for column"
" '{colname}', but got '{actual}'.".format(
expected=expected,
colname=colname,
actual=actual,
),
)
frame.to_sql(
tablename,
self.conn,
if_exists="append",
chunksize=50000,
)
def write_frame(self, tablename, frame):
if tablename not in SQLITE_ADJUSTMENT_TABLENAMES:
raise ValueError(
"Adjustment table %s not in %s"
% (
tablename,
SQLITE_ADJUSTMENT_TABLENAMES,
)
)
if not (frame is None or frame.empty):
frame = frame.copy()
frame["effective_date"] = (
frame["effective_date"]
.values.astype(
"datetime64[s]",
)
.astype("int64")
)
return self._write(
tablename,
SQLITE_ADJUSTMENT_COLUMN_DTYPES,
frame,
)
def write_dividend_payouts(self, frame):
"""
Write dividend payout data to SQLite table `dividend_payouts`.
"""
return self._write(
"dividend_payouts",
SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def write_stock_dividend_payouts(self, frame):
return self._write(
"stock_dividend_payouts",
SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES,
frame,
)
def calc_dividend_ratios(self, dividends):
"""
Calculate the ratios to apply to equities when looking back at pricing
history so that the price is smoothed over the ex_date, when the market
adjusts to the change in equity value due to upcoming dividend.
Returns
-------
DataFrame
A frame in the same format as splits and mergers, with keys
- sid, the id of the equity
- effective_date, the date in seconds on which to apply the ratio.
- ratio, the ratio to apply to backwards looking pricing data.
"""
if dividends is None or dividends.empty:
return pd.DataFrame(
np.array(
[],
dtype=[
("sid", uint64_dtype),
("effective_date", uint32_dtype),
("ratio", float64_dtype),
],
)
)
pricing_reader = self._equity_daily_bar_reader
input_sids = dividends.sid.values
unique_sids, sids_ix = np.unique(input_sids, return_inverse=True)
dates = pricing_reader.sessions.values
(close,) = pricing_reader.load_raw_arrays(
["close"],
pd.Timestamp(dates[0], tz="UTC"),
pd.Timestamp(dates[-1], tz="UTC"),
unique_sids,
)
date_ix = np.searchsorted(dates, dividends.ex_date.values)
mask = date_ix > 0
date_ix = date_ix[mask]
sids_ix = sids_ix[mask]
input_dates = dividends.ex_date.values[mask]
# subtract one day to get the close on the day prior to the merger
previous_close = close[date_ix - 1, sids_ix]
input_sids = input_sids[mask]
amount = dividends.amount.values[mask]
div_percent = dividends.div_percent.values[mask]
ratio = 1.0 - ( amount * div_percent ) / previous_close
non_nan_ratio_mask = ~np.isnan(ratio)
for ix in np.flatnonzero(~non_nan_ratio_mask):
log.warn(
"Couldn't compute ratio for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
positive_ratio_mask = ratio > 0
for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask):
log.warn(
"Dividend ratio <= 0 for dividend"
" sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}",
sid=input_sids[ix],
ex_date=pd.Timestamp(input_dates[ix]),
amount=amount[ix],
)
valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask
return pd.DataFrame(
{
"sid": input_sids[valid_ratio_mask],
"effective_date": input_dates[valid_ratio_mask],
"ratio": ratio[valid_ratio_mask],
}
)
def _write_dividends(self, dividends):
if dividends is None:
dividend_payouts = None
else:
dividend_payouts = dividends.copy()
# TODO: Check if that's the right place for this fix for pandas > 1.2.5
dividend_payouts.fillna(np.datetime64("NaT"), inplace=True)
dividend_payouts["ex_date"] = (
dividend_payouts["ex_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
dividend_payouts["record_date"] = (
dividend_payouts["record_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
dividend_payouts["declared_date"] = (
dividend_payouts["declared_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
dividend_payouts["pay_date"] = (
dividend_payouts["pay_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
self.write_dividend_payouts(dividend_payouts)
def _write_stock_dividends(self, stock_dividends):
if stock_dividends is None:
stock_dividend_payouts = None
else:
stock_dividend_payouts = stock_dividends.copy()
stock_dividend_payouts["ex_date"] = (
stock_dividend_payouts["ex_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
stock_dividend_payouts["record_date"] = (
stock_dividend_payouts["record_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
stock_dividend_payouts["declared_date"] = (
stock_dividend_payouts["declared_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
stock_dividend_payouts["pay_date"] = (
stock_dividend_payouts["pay_date"]
.values.astype("datetime64[s]")
.astype(int64_dtype)
)
self.write_stock_dividend_payouts(stock_dividend_payouts)
def write_dividend_data(self, dividends, stock_dividends=None):
"""
Write both dividend payouts and the derived price adjustment ratios.
"""
# First write the dividend payouts.
self._write_dividends(dividends)
self._write_stock_dividends(stock_dividends)
# Second from the dividend payouts, calculate ratios.
dividend_ratios = self.calc_dividend_ratios(dividends)
self.write_frame("dividends", dividend_ratios)
def write(self, splits=None, mergers=None, dividends=None, stock_dividends=None):
"""
Writes data to a SQLite file to be read by SQLiteAdjustmentReader.
Parameters
----------
splits : pandas.DataFrame, optional
Dataframe containing split data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is divided by this value.
sid : int
The asset id associated with this adjustment.
mergers : pandas.DataFrame, optional
DataFrame containing merger data. The format of this dataframe is:
effective_date : int
The date, represented as seconds since Unix epoch, on which
the adjustment should be applied.
ratio : float
A value to apply to all data earlier than the effective date.
For open, high, low, and close those values are multiplied by
the ratio. Volume is unaffected.
sid : int
The asset id associated with this adjustment.
dividends : pandas.DataFrame, optional
DataFrame containing dividend data. The format of the dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
amount : float
The cash amount paid for each share.
div_percent : flaot
The percentage of cash received from cash dividends.
Dividend ratios are calculated as:
``1.0 - (dividend_value / "close on day prior to ex_date")``
stock_dividends : pandas.DataFrame, optional
DataFrame containing stock dividend data. The format of the
dataframe is:
sid : int
The asset id associated with this adjustment.
ex_date : datetime64
The date on which an equity must be held to be eligible to
receive payment.
declared_date : datetime64
The date on which the dividend is announced to the public.
pay_date : datetime64
The date on which the dividend is distributed.
record_date : datetime64
The date on which the stock ownership is checked to determine
distribution of dividends.
payment_sid : int
The asset id of the shares that should be paid instead of
cash.
ratio : float
The ratio of currently held shares in the held sid that
should be paid with new shares of the payment_sid.
See Also
--------
zipline.data.adjustments.SQLiteAdjustmentReader
"""
self.write_frame("splits", splits)
self.write_frame("mergers", mergers)
self.write_dividend_data(dividends, stock_dividends)
# Use IF NOT EXISTS here to allow multiple writes if desired.
self.conn.execute("CREATE INDEX IF NOT EXISTS splits_sids " "ON splits(sid)")
self.conn.execute(
"CREATE INDEX IF NOT EXISTS splits_effective_date "
"ON splits(effective_date)"
)
self.conn.execute("CREATE INDEX IF NOT EXISTS mergers_sids " "ON mergers(sid)")
self.conn.execute(
"CREATE INDEX IF NOT EXISTS mergers_effective_date "
"ON mergers(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_sid " "ON dividends(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_effective_date "
"ON dividends(effective_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividend_payouts_sid "
"ON dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date "
"ON dividend_payouts(ex_date)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid "
"ON stock_dividend_payouts(sid)"
)
self.conn.execute(
"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date "
"ON stock_dividend_payouts(ex_date)"
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/adjustments.py | adjustments.py |
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.bar_reader import NoDataOnDate
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
from zipline.utils.math_utils import nanmax, nanmin
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict(
(
("open", "first"),
("high", "max"),
("low", "min"),
("close", "last"),
("volume", "sum"),
)
)
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : trading_calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict(
(c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns
)
labels = calendar.minute_index_to_session_labels(minute_frame.index)
return minute_frame.groupby(labels).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == "open":
_minute_to_session_open(close_locs, data, out)
elif column == "high":
_minute_to_session_high(close_locs, data, out)
elif column == "low":
_minute_to_session_low(close_locs, data, out)
elif column == "close":
_minute_to_session_close(close_locs, data, out)
elif column == "volume":
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
"open": None,
"high": None,
"low": None,
"close": None,
"volume": None,
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta("1 min").value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize("UTC")
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, "open")
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, "open")
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not pd.isnull(first_open):
opens.append(first_open)
entries[asset] = (dt_value, first_open)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz="UTC"
)
window = self._minute_reader.load_raw_arrays(
["open"],
after_last,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
["open"],
market_open,
dt,
[asset],
)[0]
nonnan = window[~pd.isnull(window)]
if len(nonnan):
val = nonnan[0]
else:
val = np.nan
entries[asset] = (dt_value, val)
opens.append(val)
continue
return np.array(opens)
def highs(self, assets, dt):
"""
The high field's aggregation returns the largest high seen between
the market open and the current dt.
If there has been no data on or before the `dt` the high is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, "high")
highs = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
highs.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, "high")
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
try:
last_visited_dt, last_max = entries[asset]
if last_visited_dt == dt_value:
highs.append(last_max)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(asset, dt, "high")
if pd.isnull(curr_val):
val = last_max
elif pd.isnull(last_max):
val = curr_val
else:
val = max(last_max, curr_val)
entries[asset] = (dt_value, val)
highs.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz="UTC"
)
window = self._minute_reader.load_raw_arrays(
["high"],
after_last,
dt,
[asset],
)[0].T
val = nanmax(np.append(window, last_max))
entries[asset] = (dt_value, val)
highs.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
["high"],
market_open,
dt,
[asset],
)[0].T
val = nanmax(window)
entries[asset] = (dt_value, val)
highs.append(val)
continue
return np.array(highs)
def lows(self, assets, dt):
"""
The low field's aggregation returns the smallest low seen between
the market open and the current dt.
If there has been no data on or before the `dt` the low is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, "low")
lows = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
lows.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, "low")
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
try:
last_visited_dt, last_min = entries[asset]
if last_visited_dt == dt_value:
lows.append(last_min)
continue
elif last_visited_dt == prev_dt:
curr_val = self._minute_reader.get_value(asset, dt, "low")
val = nanmin([last_min, curr_val])
entries[asset] = (dt_value, val)
lows.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz="UTC"
)
window = self._minute_reader.load_raw_arrays(
["low"],
after_last,
dt,
[asset],
)[0].T
val = nanmin(np.append(window, last_min))
entries[asset] = (dt_value, val)
lows.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
["low"],
market_open,
dt,
[asset],
)[0].T
val = nanmin(window)
entries[asset] = (dt_value, val)
lows.append(val)
continue
return np.array(lows)
def closes(self, assets, dt):
"""
The close field's aggregation returns the latest close at the given
dt.
If the close for the given dt is `nan`, the most recent non-nan
`close` is used.
If there has been no data on or before the `dt` the close is `nan`.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, "close")
closes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
def _get_filled_close(asset):
"""
Returns the most recent non-nan close for the asset in this
session. If there has been no data in this session on or before the
`dt`, returns `nan`
"""
window = self._minute_reader.load_raw_arrays(
["close"],
market_open,
dt,
[asset],
)[0]
try:
return window[~np.isnan(window)][-1]
except IndexError:
return np.NaN
for asset in assets:
if not asset.is_alive_for_session(session_label):
closes.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, "close")
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
try:
last_visited_dt, last_close = entries[asset]
if last_visited_dt == dt_value:
closes.append(last_close)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(asset, dt, "close")
if pd.isnull(val):
val = last_close
entries[asset] = (dt_value, val)
closes.append(val)
continue
else:
val = self._minute_reader.get_value(asset, dt, "close")
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
except KeyError:
val = self._minute_reader.get_value(asset, dt, "close")
if pd.isnull(val):
val = _get_filled_close(asset)
entries[asset] = (dt_value, val)
closes.append(val)
continue
return np.array(closes)
def volumes(self, assets, dt):
"""
The volume field's aggregation returns the sum of all volumes
between the market open and the `dt`
If there has been no data on or before the `dt` the volume is 0.
Returns
-------
np.array with dtype=int64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, "volume")
volumes = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
volumes.append(0)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, "volume")
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
try:
last_visited_dt, last_total = entries[asset]
if last_visited_dt == dt_value:
volumes.append(last_total)
continue
elif last_visited_dt == prev_dt:
val = self._minute_reader.get_value(asset, dt, "volume")
val += last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
else:
after_last = pd.Timestamp(
last_visited_dt + self._one_min, tz="UTC"
)
window = self._minute_reader.load_raw_arrays(
["volume"],
after_last,
dt,
[asset],
)[0]
val = np.nansum(window) + last_total
entries[asset] = (dt_value, val)
volumes.append(val)
continue
except KeyError:
window = self._minute_reader.load_raw_arrays(
["volume"],
market_open,
dt,
[asset],
)[0]
val = np.nansum(window)
entries[asset] = (dt_value, val)
volumes.append(val)
continue
return np.array(volumes)
class MinuteResampleSessionBarReader(SessionBarReader):
def __init__(self, calendar, minute_bar_reader):
self._calendar = calendar
self._minute_bar_reader = minute_bar_reader
def _get_resampled(self, columns, start_session, end_session, assets):
range_open = self._calendar.session_open(start_session)
range_close = self._calendar.session_close(end_session)
minute_data = self._minute_bar_reader.load_raw_arrays(
columns,
range_open,
range_close,
assets,
)
# Get the index of the close minute for each session in the range.
# If the range contains only one session, the only close in the range
# is the last minute in the data. Otherwise, we need to get all the
# session closes and find their indices in the range of minutes.
if start_session == end_session:
close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64)
else:
minutes = self._calendar.minutes_in_range(
range_open,
range_close,
)
session_closes = self._calendar.session_closes_in_range(
start_session,
end_session,
)
close_ilocs = minutes.searchsorted(pd.DatetimeIndex(session_closes))
results = []
shape = (len(close_ilocs), len(assets))
for col in columns:
if col != "volume":
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
results.append(out)
for i in range(len(assets)):
for j, column in enumerate(columns):
data = minute_data[j][:, i]
minute_to_session(column, close_ilocs, data, results[j][:, i])
return results
@property
def trading_calendar(self):
return self._calendar
def load_raw_arrays(self, columns, start_dt, end_dt, sids):
return self._get_resampled(columns, start_dt, end_dt, sids)
def get_value(self, sid, session, colname):
# WARNING: This will need caching or other optimization if used in a
# tight loop.
# This was developed to complete interface, but has not been tuned
# for real world use.
return self._get_resampled([colname], session, session, [sid])[0][0][0]
@lazyval
def sessions(self):
cal = self._calendar
first = self._minute_bar_reader.first_trading_day
last = cal.minute_to_session_label(self._minute_bar_reader.last_available_dt)
return cal.sessions_in_range(first, last)
@lazyval
def last_available_dt(self):
return self.trading_calendar.minute_to_session_label(
self._minute_bar_reader.last_available_dt
)
@property
def first_trading_day(self):
return self._minute_bar_reader.first_trading_day
def get_last_traded_dt(self, asset, dt):
last_dt = self._minute_bar_reader.get_last_traded_dt(asset, dt)
if pd.isnull(last_dt):
# todo: this doesn't seem right
return self.trading_calendar.first_trading_session
return self.trading_calendar.minute_to_session_label(last_dt)
class ReindexBarReader(metaclass=ABCMeta):
"""
A base class for readers which reindexes results, filling in the additional
indices with empty data.
Used to align the reading assets which trade on different calendars.
Currently only supports a ``trading_calendar`` which is a superset of the
``reader``'s calendar.
Parameters
----------
- trading_calendar : zipline.utils.trading_calendar.TradingCalendar
The calendar to use when indexing results from the reader.
- reader : MinuteBarReader|SessionBarReader
The reader which has a calendar that is a subset of the desired
``trading_calendar``.
- first_trading_session : pd.Timestamp
The first trading session the reader should provide. Must be specified,
since the ``reader``'s first session may not exactly align with the
desired calendar. Specifically, in the case where the first session
on the target calendar is a holiday on the ``reader``'s calendar.
- last_trading_session : pd.Timestamp
The last trading session the reader should provide. Must be specified,
since the ``reader``'s last session may not exactly align with the
desired calendar. Specifically, in the case where the last session
on the target calendar is a holiday on the ``reader``'s calendar.
"""
def __init__(
self,
trading_calendar,
reader,
first_trading_session,
last_trading_session,
):
self._trading_calendar = trading_calendar
self._reader = reader
self._first_trading_session = first_trading_session
self._last_trading_session = last_trading_session
@property
def last_available_dt(self):
return self._reader.last_available_dt
def get_last_traded_dt(self, sid, dt):
return self._reader.get_last_traded_dt(sid, dt)
@property
def first_trading_day(self):
return self._reader.first_trading_day
def get_value(self, sid, dt, field):
# Give an empty result if no data is present.
try:
return self._reader.get_value(sid, dt, field)
except NoDataOnDate:
if field == "volume":
return 0
else:
return np.nan
@abstractmethod
def _outer_dts(self, start_dt, end_dt):
raise NotImplementedError
@abstractmethod
def _inner_dts(self, start_dt, end_dt):
raise NotImplementedError
@property
def trading_calendar(self):
return self._trading_calendar
@lazyval
def sessions(self):
return self.trading_calendar.sessions_in_range(
self._first_trading_session, self._last_trading_session
)
def load_raw_arrays(self, fields, start_dt, end_dt, sids):
outer_dts = self._outer_dts(start_dt, end_dt)
inner_dts = self._inner_dts(start_dt, end_dt)
indices = outer_dts.searchsorted(inner_dts)
shape = len(outer_dts), len(sids)
outer_results = []
if len(inner_dts) > 0:
inner_results = self._reader.load_raw_arrays(
fields, inner_dts[0], inner_dts[-1], sids
)
else:
inner_results = None
for i, field in enumerate(fields):
if field != "volume":
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
if inner_results is not None:
out[indices] = inner_results[i]
outer_results.append(out)
return outer_results
class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self._trading_calendar.minutes_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.calendar.minutes_in_range(start_dt, end_dt)
class ReindexSessionBarReader(ReindexBarReader, SessionBarReader):
"""
See: ``ReindexBarReader``
"""
def _outer_dts(self, start_dt, end_dt):
return self.trading_calendar.sessions_in_range(start_dt, end_dt)
def _inner_dts(self, start_dt, end_dt):
return self._reader.trading_calendar.sessions_in_range(start_dt, end_dt) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/resample.py | resample.py |
import numpy as np
import pandas as pd
from zipline.data.session_bars import SessionBarReader
class ContinuousFutureSessionBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'sid'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_date, end_date, asset.offset
)
num_sessions = len(
self.trading_calendar.sessions_in_range(start_date, end_date)
)
shape = num_sessions, len(assets)
results = []
tc = self._bar_reader.trading_calendar
sessions = tc.sessions_in_range(start_date, end_date)
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = sessions.get_loc(start)
if roll_date is not None:
end = roll_date - sessions.freq
end_loc = sessions.get_loc(end)
else:
end = end_date
end_loc = len(sessions) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll_date is not None:
start = sessions[end_loc + 1]
for column in columns:
if column != "volume" and column != "sid":
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.int64)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != "sid":
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid]
)[0][:, 0]
else:
result = int(sid)
out[start_loc : end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = rf.get_contract_center(
continuous_future.root_symbol, dt, continuous_future.offset
)
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = rf.get_contract_center(asset.root_symbol, dt, asset.offset)
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unioning the range for all assets) which the
reader can provide.
"""
return self._bar_reader.sessions
class ContinuousFutureMinuteBarReader(SessionBarReader):
def __init__(self, bar_reader, roll_finders):
self._bar_reader = bar_reader
self._roll_finders = roll_finders
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
fields : list of str
'open', 'high', 'low', 'close', or 'volume'
start_dt: Timestamp
Beginning of the window range.
end_dt: Timestamp
End of the window range.
sids : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
rolls_by_asset = {}
tc = self.trading_calendar
start_session = tc.minute_to_session_label(start_date)
end_session = tc.minute_to_session_label(end_date)
for asset in assets:
rf = self._roll_finders[asset.roll_style]
rolls_by_asset[asset] = rf.get_rolls(
asset.root_symbol, start_session, end_session, asset.offset
)
sessions = tc.sessions_in_range(start_date, end_date)
minutes = tc.minutes_in_range(start_date, end_date)
num_minutes = len(minutes)
shape = num_minutes, len(assets)
results = []
# Get partitions
partitions_by_asset = {}
for asset in assets:
partitions = []
partitions_by_asset[asset] = partitions
rolls = rolls_by_asset[asset]
start = start_date
for roll in rolls:
sid, roll_date = roll
start_loc = minutes.searchsorted(start)
if roll_date is not None:
_, end = tc.open_and_close_for_session(roll_date - sessions.freq)
end_loc = minutes.searchsorted(end)
else:
end = end_date
end_loc = len(minutes) - 1
partitions.append((sid, start, end, start_loc, end_loc))
if roll[-1] is not None:
start, _ = tc.open_and_close_for_session(
tc.minute_to_session_label(minutes[end_loc + 1])
)
for column in columns:
if column != "volume":
out = np.full(shape, np.nan)
else:
out = np.zeros(shape, dtype=np.uint32)
for i, asset in enumerate(assets):
partitions = partitions_by_asset[asset]
for sid, start, end, start_loc, end_loc in partitions:
if column != "sid":
result = self._bar_reader.load_raw_arrays(
[column], start, end, [sid]
)[0][:, 0]
else:
result = int(sid)
out[start_loc : end_loc + 1, i] = result
results.append(out)
return results
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return self._bar_reader.last_available_dt
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
return self._bar_reader.trading_calendar
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return self._bar_reader.first_trading_day
def get_value(self, continuous_future, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
rf = self._roll_finders[continuous_future.roll_style]
sid = rf.get_contract_center(
continuous_future.root_symbol, dt, continuous_future.offset
)
return self._bar_reader.get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
rf = self._roll_finders[asset.roll_style]
sid = rf.get_contract_center(asset.root_symbol, dt, asset.offset)
if sid is None:
return pd.NaT
contract = rf.asset_finder.retrieve_asset(sid)
return self._bar_reader.get_last_traded_dt(contract, dt)
@property
def sessions(self):
return self._bar_reader.sessions | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/continuous_future_reader.py | continuous_future_reader.py |
from functools import partial
import h5py
import logbook
import numpy as np
import pandas as pd
from functools import reduce
from zipline.data.bar_reader import (
NoDataAfterDate,
NoDataBeforeDate,
NoDataForSid,
NoDataOnDate,
)
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array
from zipline.utils.pandas_utils import check_indexes_all_same
log = logbook.Logger("HDF5DailyBars")
VERSION = 0
DATA = "data"
INDEX = "index"
LIFETIMES = "lifetimes"
CURRENCY = "currency"
CODE = "code"
SCALING_FACTOR = "scaling_factor"
OPEN = "open"
HIGH = "high"
LOW = "low"
CLOSE = "close"
VOLUME = "volume"
FIELDS = (OPEN, HIGH, LOW, CLOSE, VOLUME)
DAY = "day"
SID = "sid"
START_DATE = "start_date"
END_DATE = "end_date"
# XXX is reserved for "transactions involving no currency".
MISSING_CURRENCY = "XXX"
DEFAULT_SCALING_FACTORS = {
# Retain 3 decimal places for prices.
OPEN: 1000,
HIGH: 1000,
LOW: 1000,
CLOSE: 1000,
# Volume is expected to be a whole integer.
VOLUME: 1,
}
def coerce_to_uint32(a, scaling_factor):
"""
Returns a copy of the array as uint32, applying a scaling factor to
maintain precision if supplied.
"""
return (a * scaling_factor).round().astype("uint32")
def days_and_sids_for_frames(frames):
"""
Returns the date index and sid columns shared by a list of dataframes,
ensuring they all match.
Parameters
----------
frames : list[pd.DataFrame]
A list of dataframes indexed by day, with a column per sid.
Returns
-------
days : np.array[datetime64[ns]]
The days in these dataframes.
sids : np.array[int64]
The sids in these dataframes.
Raises
------
ValueError
If the dataframes passed are not all indexed by the same days
and sids.
"""
if not frames:
days = np.array([], dtype="datetime64[ns]")
sids = np.array([], dtype="int64")
return days, sids
# Ensure the indices and columns all match.
check_indexes_all_same(
[frame.index for frame in frames],
message="Frames have mismatched days.",
)
check_indexes_all_same(
[frame.columns for frame in frames],
message="Frames have mismatched sids.",
)
return frames[0].index.values, frames[0].columns.values
class HDF5DailyBarWriter(object):
"""
Class capable of writing daily OHLCV data to disk in a format that
can be read efficiently by HDF5DailyBarReader.
Parameters
----------
filename : str
The location at which we should write our output.
date_chunk_size : int
The number of days per chunk in the HDF5 file. If this is
greater than the number of days in the data, the chunksize will
match the actual number of days.
See Also
--------
zipline.data.hdf5_daily_bars.HDF5DailyBarReader
"""
def __init__(self, filename, date_chunk_size):
self._filename = filename
self._date_chunk_size = date_chunk_size
def h5_file(self, mode):
return h5py.File(self._filename, mode)
def write(self, country_code, frames, currency_codes=None, scaling_factors=None):
"""
Write the OHLCV data for one country to the HDF5 file.
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row
for each date and a column for each sid. The dataframes need
to have the same index and columns.
currency_codes : pd.Series, optional
Series mapping sids to 3-digit currency code values for those sids'
listing currencies. If not passed, missing currencies will be
written.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
if scaling_factors is None:
scaling_factors = DEFAULT_SCALING_FACTORS
# Note that this functions validates that all of the frames
# share the same days and sids.
days, sids = days_and_sids_for_frames(list(frames.values()))
# XXX: We should make this required once we're using it everywhere.
if currency_codes is None:
currency_codes = pd.Series(index=sids, data=MISSING_CURRENCY)
# Currency codes should match dataframe columns.
check_sids_arrays_match(
sids,
currency_codes.index.values,
message="currency_codes sids do not match data sids:",
)
# Write start and end dates for each sid.
start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames)
if len(sids):
chunks = (len(sids), min(self._date_chunk_size, len(days)))
else:
# h5py crashes if we provide chunks for empty data.
chunks = None
with self.h5_file(mode="a") as h5_file:
# ensure that the file version has been written
h5_file.attrs["version"] = VERSION
country_group = h5_file.create_group(country_code)
self._write_index_group(country_group, days, sids)
self._write_lifetimes_group(
country_group,
start_date_ixs,
end_date_ixs,
)
self._write_currency_group(country_group, currency_codes)
self._write_data_group(
country_group,
frames,
scaling_factors,
chunks,
)
def write_from_sid_df_pairs(
self, country_code, data, currency_codes=None, scaling_factors=None
):
"""
Parameters
----------
country_code : str
The ISO 3166 alpha-2 country code for this country.
data : iterable[tuple[int, pandas.DataFrame]]
The data chunks to write. Each chunk should be a tuple of
sid and the data for that asset.
currency_codes : pd.Series, optional
Series mapping sids to 3-digit currency code values for those sids'
listing currencies. If not passed, missing currencies will be
written.
scaling_factors : dict[str, float], optional
A dict mapping each OHLCV field to a scaling factor, which
is applied (as a multiplier) to the values of field to
efficiently store them as uint32, while maintaining desired
precision. These factors are written to the file as metadata,
which is consumed by the reader to adjust back to the original
float values. Default is None, in which case
DEFAULT_SCALING_FACTORS is used.
"""
data = list(data)
if not data:
empty_frame = pd.DataFrame(
data=None,
index=np.array([], dtype="datetime64[ns]"),
columns=np.array([], dtype="int64"),
)
return self.write(
country_code,
{f: empty_frame.copy() for f in FIELDS},
scaling_factors,
)
sids, frames = zip(*data)
ohlcv_frame = pd.concat(frames)
# Repeat each sid for each row in its corresponding frame.
sid_ix = np.repeat(sids, [len(f) for f in frames])
# Add id to the index, so the frame is indexed by (date, id).
ohlcv_frame.set_index(sid_ix, append=True, inplace=True)
frames = {field: ohlcv_frame[field].unstack() for field in FIELDS}
return self.write(
country_code=country_code,
frames=frames,
scaling_factors=scaling_factors,
currency_codes=currency_codes,
)
def _write_index_group(self, country_group, days, sids):
"""Write /country/index."""
index_group = country_group.create_group(INDEX)
self._log_writing_dataset(index_group)
index_group.create_dataset(SID, data=sids)
# h5py does not support datetimes, so they need to be stored
# as integers.
index_group.create_dataset(DAY, data=days.astype(np.int64))
def _write_lifetimes_group(self, country_group, start_date_ixs, end_date_ixs):
"""Write /country/lifetimes"""
lifetimes_group = country_group.create_group(LIFETIMES)
self._log_writing_dataset(lifetimes_group)
lifetimes_group.create_dataset(START_DATE, data=start_date_ixs)
lifetimes_group.create_dataset(END_DATE, data=end_date_ixs)
def _write_currency_group(self, country_group, currencies):
"""Write /country/currency"""
currency_group = country_group.create_group(CURRENCY)
self._log_writing_dataset(currency_group)
currency_group.create_dataset(
CODE,
data=currencies.values.astype(dtype="S3"),
)
def _write_data_group(self, country_group, frames, scaling_factors, chunks):
"""Write /country/data"""
data_group = country_group.create_group(DATA)
self._log_writing_dataset(data_group)
for field in FIELDS:
frame = frames[field]
# Sort rows by increasing sid, and columns by increasing date.
frame.sort_index(inplace=True)
frame.sort_index(axis="columns", inplace=True)
data = coerce_to_uint32(
frame.T.fillna(0).values,
scaling_factors[field],
)
dataset = data_group.create_dataset(
field,
compression="lzf",
shuffle=True,
data=data,
chunks=chunks,
)
self._log_writing_dataset(dataset)
dataset.attrs[SCALING_FACTOR] = scaling_factors[field]
log.debug("Writing dataset {} to file {}", dataset.name, self._filename)
def _log_writing_dataset(self, dataset):
log.debug("Writing {} to file {}", dataset.name, self._filename)
def compute_asset_lifetimes(frames):
"""
Parameters
----------
frames : dict[str, pd.DataFrame]
A dict mapping each OHLCV field to a dataframe with a row for
each date and a column for each sid, as passed to write().
Returns
-------
start_date_ixs : np.array[int64]
The index of the first date with non-nan values, for each sid.
end_date_ixs : np.array[int64]
The index of the last date with non-nan values, for each sid.
"""
# Build a 2D array (dates x sids), where an entry is True if all
# fields are nan for the given day and sid.
is_null_matrix = np.logical_and.reduce(
[frames[field].isnull().values for field in FIELDS],
)
if not is_null_matrix.size:
empty = np.array([], dtype="int64")
return empty, empty.copy()
# Offset of the first null from the start of the input.
start_date_ixs = is_null_matrix.argmin(axis=0)
# Offset of the last null from the **end** of the input.
end_offsets = is_null_matrix[::-1].argmin(axis=0)
# Offset of the last null from the start of the input
end_date_ixs = is_null_matrix.shape[0] - end_offsets - 1
return start_date_ixs, end_date_ixs
def convert_price_with_scaling_factor(a, scaling_factor):
conversion_factor = 1.0 / scaling_factor
zeroes = a == 0
return np.where(zeroes, np.nan, a.astype("float64")) * conversion_factor
class HDF5DailyBarReader(CurrencyAwareSessionBarReader):
"""
Parameters
---------
country_group : h5py.Group
The group for a single country in an HDF5 daily pricing file.
"""
def __init__(self, country_group):
self._country_group = country_group
self._postprocessors = {
OPEN: partial(
convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(OPEN),
),
HIGH: partial(
convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(HIGH),
),
LOW: partial(
convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(LOW),
),
CLOSE: partial(
convert_price_with_scaling_factor,
scaling_factor=self._read_scaling_factor(CLOSE),
),
VOLUME: lambda a: a,
}
@classmethod
def from_file(cls, h5_file, country_code):
"""
Construct from an h5py.File and a country code.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
if h5_file.attrs["version"] != VERSION:
raise ValueError(
"mismatched version: file is of version %s, expected %s"
% (
h5_file.attrs["version"],
VERSION,
),
)
return cls(h5_file[country_code])
@classmethod
def from_path(cls, path, country_code):
"""
Construct from a file path and a country code.
Parameters
----------
path : str
The path to an HDF5 daily pricing file.
country_code : str
The ISO 3166 alpha-2 country code for the country to read.
"""
return cls.from_file(h5py.File(path), country_code)
def _read_scaling_factor(self, field):
return self._country_group[DATA][field].attrs[SCALING_FACTOR]
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
columns : list of str
'open', 'high', 'low', 'close', or 'volume'
start_date: Timestamp
Beginning of the window range.
end_date: Timestamp
End of the window range.
assets : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
self._validate_timestamp(start_date)
self._validate_timestamp(end_date)
start = start_date.asm8
end = end_date.asm8
date_slice = self._compute_date_range_slice(start, end)
n_dates = date_slice.stop - date_slice.start
# Create a buffer into which we'll read data from the h5 file.
# Allocate an extra row of space that will always contain null values.
# We'll use that space to provide "data" for entries in ``assets`` that
# are unknown to us.
full_buf = np.zeros((len(self.sids) + 1, n_dates), dtype=np.uint32)
# We'll only read values into this portion of the read buf.
mutable_buf = full_buf[:-1]
# Indexer that converts an array aligned to self.sids (which is what we
# pull from the h5 file) into an array aligned to ``assets``.
#
# Unknown assets will have an index of -1, which means they'll always
# pull from the last row of the read buffer. We allocated an extra
# empty row above so that these lookups will cause us to fill our
# output buffer with "null" values.
sid_selector = self._make_sid_selector(assets)
out = []
for column in columns:
# Zero the buffer to prepare to receive new data.
mutable_buf.fill(0)
dataset = self._country_group[DATA][column]
# Fill the mutable portion of our buffer with data from the file.
dataset.read_direct(
mutable_buf,
np.s_[:, date_slice],
)
# Select data from the **full buffer**. Unknown assets will pull
# from the last row, which is always empty.
out.append(self._postprocessors[column](full_buf[sid_selector].T))
return out
def _make_sid_selector(self, assets):
"""
Build an indexer mapping ``self.sids`` to ``assets``.
Parameters
----------
assets : list[int]
List of assets requested by a caller of ``load_raw_arrays``.
Returns
-------
index : np.array[int64]
Index array containing the index in ``self.sids`` for each location
in ``assets``. Entries in ``assets`` for which we don't have a sid
will contain -1. It is caller's responsibility to handle these
values correctly.
"""
assets = np.array(assets)
sid_selector = self.sids.searchsorted(assets)
unknown = np.in1d(assets, self.sids, invert=True)
sid_selector[unknown] = -1
return sid_selector
def _compute_date_range_slice(self, start_date, end_date):
# Get the index of the start of dates for ``start_date``.
start_ix = self.dates.searchsorted(start_date)
# Get the index of the start of the first date **after** end_date.
end_ix = self.dates.searchsorted(end_date, side="right")
return slice(start_ix, end_ix)
def _validate_assets(self, assets):
"""Validate that asset identifiers are contained in the daily bars.
Parameters
----------
assets : array-like[int]
The asset identifiers to validate.
Raises
------
NoDataForSid
If one or more of the provided asset identifiers are not
contained in the daily bars.
"""
missing_sids = np.setdiff1d(assets, self.sids)
if len(missing_sids):
raise NoDataForSid(
"Assets not contained in daily pricing file: {}".format(missing_sids)
)
def _validate_timestamp(self, ts):
if ts.asm8 not in self.dates:
raise NoDataOnDate(ts)
@lazyval
def dates(self):
return self._country_group[INDEX][DAY][:].astype("datetime64[ns]")
@lazyval
def sids(self):
return self._country_group[INDEX][SID][:].astype("int64", copy=False)
@lazyval
def asset_start_dates(self):
return self.dates[self._country_group[LIFETIMES][START_DATE][:]]
@lazyval
def asset_end_dates(self):
return self.dates[self._country_group[LIFETIMES][END_DATE][:]]
@lazyval
def _currency_codes(self):
bytes_array = self._country_group[CURRENCY][CODE][:]
return bytes_array_to_native_str_object_array(bytes_array)
def currency_codes(self, sids):
"""Get currencies in which prices are quoted for the requested sids.
Parameters
----------
sids : np.array[int64]
Array of sids for which currencies are needed.
Returns
-------
currency_codes : np.array[object]
Array of currency codes for listing currencies of ``sids``.
"""
# Find the index of requested sids in our stored sids.
ixs = self.sids.searchsorted(sids, side="left")
result = self._currency_codes[ixs]
# searchsorted returns the index of the next lowest sid if the lookup
# fails. Fill these sids with the special "missing" sentinel.
not_found = self.sids[ixs] != sids
result[not_found] = None
return result
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return pd.Timestamp(self.dates[-1], tz="UTC")
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
raise NotImplementedError(
"HDF5 pricing does not yet support trading calendars."
)
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return pd.Timestamp(self.dates[0], tz="UTC")
@lazyval
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unioning the range for all assets) which the
reader can provide.
"""
return pd.to_datetime(self.dates, utc=True)
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
self._validate_assets([sid])
self._validate_timestamp(dt)
sid_ix = self.sids.searchsorted(sid)
dt_ix = self.dates.searchsorted(dt.asm8)
value = self._postprocessors[field](
self._country_group[DATA][field][sid_ix, dt_ix]
)
# When the value is nan, this dt may be outside the asset's lifetime.
# If that's the case, the proper NoDataOnDate exception is raised.
# Otherwise (when there's just a hole in the middle of the data), the
# nan is returned.
if np.isnan(value):
if dt.asm8 < self.asset_start_dates[sid_ix]:
raise NoDataBeforeDate()
if dt.asm8 > self.asset_end_dates[sid_ix]:
raise NoDataAfterDate()
return value
def get_last_traded_dt(self, asset, dt):
"""
Get the latest day on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded day.
dt : pd.Timestamp
The dt at which to start searching for the last traded day.
Returns
-------
last_traded : pd.Timestamp
The day of the last trade for the given asset, using the
input dt as a vantage point.
"""
sid_ix = self.sids.searchsorted(asset.sid)
# Used to get a slice of all dates up to and including ``dt``.
dt_limit_ix = self.dates.searchsorted(dt.asm8, side="right")
# Get the indices of all dates with nonzero volume.
nonzero_volume_ixs = np.ravel(
np.nonzero(self._country_group[DATA][VOLUME][sid_ix, :dt_limit_ix])
)
if len(nonzero_volume_ixs) == 0:
return pd.NaT
return pd.Timestamp(self.dates[nonzero_volume_ixs][-1], tz="UTC")
class MultiCountryDailyBarReader(CurrencyAwareSessionBarReader):
"""
Parameters
---------
readers : dict[str -> SessionBarReader]
A dict mapping country codes to SessionBarReader instances to
service each country.
"""
def __init__(self, readers):
self._readers = readers
self._country_map = pd.concat(
[
pd.Series(index=reader.sids, data=country_code)
for country_code, reader in readers.items()
]
)
@classmethod
def from_file(cls, h5_file):
"""
Construct from an h5py.File.
Parameters
----------
h5_file : h5py.File
An HDF5 daily pricing file.
"""
return cls(
{
country: HDF5DailyBarReader.from_file(h5_file, country)
for country in h5_file.keys()
}
)
@classmethod
def from_path(cls, path):
"""
Construct from a file path.
Parameters
----------
path : str
Path to an HDF5 daily pricing file.
"""
return cls.from_file(h5py.File(path))
@property
def countries(self):
"""A set-like object of the country codes supplied by this reader."""
return self._readers.keys()
def _country_code_for_assets(self, assets):
country_codes = self._country_map.reindex(assets)
# Series.get() returns None if none of the labels are in the index.
if country_codes is not None:
unique_country_codes = country_codes.dropna().unique()
num_countries = len(unique_country_codes)
else:
num_countries = 0
if num_countries == 0:
raise ValueError("At least one valid asset id is required.")
elif num_countries > 1:
raise NotImplementedError(
(
"Assets were requested from multiple countries ({}),"
" but multi-country reads are not yet supported."
).format(list(unique_country_codes))
)
return unique_country_codes.item()
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
columns : list of str
'open', 'high', 'low', 'close', or 'volume'
start_date: Timestamp
Beginning of the window range.
end_date: Timestamp
End of the window range.
assets : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
country_code = self._country_code_for_assets(assets)
return self._readers[country_code].load_raw_arrays(
columns,
start_date,
end_date,
assets,
)
@property
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
return max(reader.last_available_dt for reader in self._readers.values())
@property
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
raise NotImplementedError(
"HDF5 pricing does not yet support trading calendars."
)
@property
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
return min(reader.first_trading_day for reader in self._readers.values())
@property
def sessions(self):
"""
Returns
-------
sessions : DatetimeIndex
All session labels (unioning the range for all assets) which the
reader can provide.
"""
return pd.to_datetime(
reduce(
np.union1d,
(reader.dates for reader in self._readers.values()),
),
utc=True,
)
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
NoDataForSid
If the given sid is not valid.
"""
try:
country_code = self._country_code_for_assets([sid])
except ValueError as exc:
raise NoDataForSid(
"Asset not contained in daily pricing file: {}".format(sid)
) from exc
return self._readers[country_code].get_value(sid, dt, field)
def get_last_traded_dt(self, asset, dt):
"""
Get the latest day on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded day.
dt : pd.Timestamp
The dt at which to start searching for the last traded day.
Returns
-------
last_traded : pd.Timestamp
The day of the last trade for the given asset, using the
input dt as a vantage point.
"""
country_code = self._country_code_for_assets([asset.sid])
return self._readers[country_code].get_last_traded_dt(asset, dt)
def currency_codes(self, sids):
"""Get currencies in which prices are quoted for the requested sids.
Assumes that a sid's prices are always quoted in a single currency.
Parameters
----------
sids : np.array[int64]
Array of sids for which currencies are needed.
Returns
-------
currency_codes : np.array[S3]
Array of currency codes for listing currencies of ``sids``.
"""
country_code = self._country_code_for_assets(sids)
return self._readers[country_code].currency_codes(sids)
def check_sids_arrays_match(left, right, message):
"""Check that two 1d arrays of sids are equal"""
if len(left) != len(right):
raise ValueError(
"{}:\nlen(left) ({}) != len(right) ({})".format(
message, len(left), len(right)
)
)
diff = left != right
if diff.any():
(bad_locs,) = np.where(diff)
raise ValueError("{}:\n Indices with differences: {}".format(message, bad_locs)) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/hdf5_daily_bars.py | hdf5_daily_bars.py |
from abc import ABCMeta, abstractmethod, abstractproperty
class NoDataOnDate(Exception):
"""
Raised when a spot price cannot be found for the sid and date.
"""
pass
class NoDataBeforeDate(NoDataOnDate):
pass
class NoDataAfterDate(NoDataOnDate):
pass
class NoDataForSid(Exception):
"""
Raised when the requested sid is missing from the pricing data.
"""
pass
OHLCV = ("open", "high", "low", "close", "volume")
class BarReader(object, metaclass=ABCMeta):
@abstractproperty
def data_frequency(self):
pass
@abstractmethod
def load_raw_arrays(self, columns, start_date, end_date, assets):
"""
Parameters
----------
columns : list of str
'open', 'high', 'low', 'close', or 'volume'
start_date: Timestamp
Beginning of the window range.
end_date: Timestamp
End of the window range.
assets : list of int
The asset identifiers in the window.
Returns
-------
list of np.ndarray
A list with an entry per field of ndarrays with shape
(minutes in range, sids) with a dtype of float64, containing the
values for the respective field over start and end dt range.
"""
pass
@abstractproperty
def last_available_dt(self):
"""
Returns
-------
dt : pd.Timestamp
The last session for which the reader can provide data.
"""
pass
@abstractproperty
def trading_calendar(self):
"""
Returns the zipline.utils.calendar.trading_calendar used to read
the data. Can be None (if the writer didn't specify it).
"""
pass
@abstractproperty
def first_trading_day(self):
"""
Returns
-------
dt : pd.Timestamp
The first trading day (session) for which the reader can provide
data.
"""
pass
@abstractmethod
def get_value(self, sid, dt, field):
"""
Retrieve the value at the given coordinates.
Parameters
----------
sid : int
The asset identifier.
dt : pd.Timestamp
The timestamp for the desired data point.
field : string
The OHLVC name for the desired data point.
Returns
-------
value : float|int
The value at the given coordinates, ``float`` for OHLC, ``int``
for 'volume'.
Raises
------
NoDataOnDate
If the given dt is not a valid market minute (in minute mode) or
session (in daily mode) according to this reader's tradingcalendar.
"""
pass
@abstractmethod
def get_last_traded_dt(self, asset, dt):
"""
Get the latest minute on or before ``dt`` in which ``asset`` traded.
If there are no trades on or before ``dt``, returns ``pd.NaT``.
Parameters
----------
asset : zipline.asset.Asset
The asset for which to get the last traded minute.
dt : pd.Timestamp
The minute at which to start searching for the last traded minute.
Returns
-------
last_traded : pd.Timestamp
The dt of the last trade for the given asset, using the input
dt as a vantage point.
"""
pass | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bar_reader.py | bar_reader.py |
from io import BytesIO
import os
from click import progressbar
import logging
import pandas as pd
import requests
from zipline.utils.calendar_utils import register_calendar_alias
from datetime import datetime
import numpy as np
import re
import tejapi
from .core import register
from time import sleep , mktime
log = logging.getLogger(__name__)
tejapi.ApiConfig.page_limit = 10000
ONE_MEGABYTE = 1024 * 1024
def load_data_table(file, index_col, show_progress=False):
"""Load data table from zip file provided by TEJ."""
if show_progress:
log.info("Parsing raw data.")
data_table = file[
['ticker',
'date',
'open',
'high',
'low',
'close',
'volume',
'ex-dividend',
'split_ratio',
'out_pay',
'news_d',
'lastreg',
'div_percent',
]
]
data_table.rename(
columns={
"ticker": "symbol",
"ex-dividend": "ex_dividend",
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key, show_progress, coid, mdate):
"""Fetch Prices data table from TEJ"""
tejapi.ApiConfig.api_key = api_key
if show_progress:
log.info("Downloading TEJ metadata.")
try :
metadata = tejapi.fastget('TWN/APIPRCD', coid =coid ,mdate = mdate,opts = {'columns': ['mdate','coid', 'open_d', 'low_d', 'high_d', 'close_d', 'vol', 'adjfac_a']}, paginate=True)
if metadata.size == 0 :
raise ValueError("Did not fetch any metadata. Please check the correctness of your ticker and mdate.")
metadata['vol'] = metadata['vol'] * 1000
cash_dividend = tejapi.fastget('TWN/ADIV', coid = coid ,mdate= mdate,opts = {'columns':['coid','div','mdate','out_pay','news_d','lastreg']},paginate=True)
cash_back = tejapi.fastget('TWN/ASTK1',coid = coid,mdate = mdate , opts = {'columns':['coid','mdate','x_cap_date','x_lastreg', 'cc_pay_date', 'ashback']},paginate=True)
cash_back = cash_back.loc[cash_back.ashback != 0,['coid','ashback','mdate','x_cap_date','x_lastreg','cc_pay_date']].rename({'x_cap_date':'news_d','x_lastreg':'lastreg','cc_pay_date':'out_pay','ashback':'div'},axis=1)
cash_back['cash_back'] = True
cash_dividend['div_percent'] = 0
cash_dividend = pd.concat([cash_dividend,cash_back])
cash_back['cash_back'] = cash_back['cash_back'].fillna(False)
if cash_dividend.size > 0 :
adjusted_cash_dividends = cash_dividend.groupby(['coid','mdate',])[['div']].sum().join(cash_dividend.set_index(['coid','mdate']),lsuffix = '_correct')
dividend_percentage = adjusted_cash_dividends.loc[adjusted_cash_dividends['cash_back'] != True,['div','div_correct',]]
dividend_percentage['div_percent'] = dividend_percentage.groupby(['coid','mdate'])['div'].sum() / dividend_percentage['div_correct']
adjusted_cash_dividends = adjusted_cash_dividends.join(dividend_percentage[['div_percent']], how = 'left', lsuffix = '_ignore')
adjusted_cash_dividends['div_percent'] = adjusted_cash_dividends['div_percent'].fillna(0)
adjusted_cash_dividends['div'] = adjusted_cash_dividends['div_correct']
del adjusted_cash_dividends['div_correct'] , adjusted_cash_dividends['div_percent_ignore']
adjusted_cash_dividends = adjusted_cash_dividends.loc[(adjusted_cash_dividends['cash_back'] != True) | (adjusted_cash_dividends['div_percent'] == 0 ) ]
adjusted_cash_dividends = adjusted_cash_dividends[~adjusted_cash_dividends.index.duplicated()]
cash_dividend = adjusted_cash_dividends
del adjusted_cash_dividends
metadata = metadata.merge(cash_dividend, on = ['coid','mdate'],how = 'left')
first_list = metadata.drop_duplicates(subset = ['coid'],keep = 'first').index.tolist()
last_list = metadata.drop_duplicates(subset = ['coid'],keep = 'last').index.tolist()
metadata['adjfac_a2'] = (metadata['adjfac_a'].copy().shift(1)).bfill()
metadata['split_ratio'] = metadata['adjfac_a2'] / metadata['adjfac_a']
metadata.loc[first_list,'split_ratio'] = 1
metadata.loc[last_list,'split_ratio'] = 1
metadata['split_ratio'] = 1 / metadata['split_ratio']
del metadata['adjfac_a'],metadata['adjfac_a2']
metadata = metadata.rename({'coid':'ticker','open_d':'open','low_d':'low','high_d':'high','close_d':'close','vol':'volume','div':'ex-dividend'},axis =1)
metadata["ex-dividend"].fillna(0,inplace= True)
metadata.loc[first_list,'ex-dividend'] = 0
metadata['date'] = metadata['mdate'].apply(lambda x : pd.Timestamp(x.strftime('%Y%m%d')) if not pd.isna(x) else pd.NaT)
metadata['out_pay'] = pd.to_datetime(metadata['out_pay'])
# if out_pay is NaT then set default out_pay day = T+21
metadata.loc[(metadata['ex-dividend'] != 0)&(metadata['out_pay'].isna()),'out_pay'] = metadata.loc[(metadata['ex-dividend'] != 0)&(metadata['out_pay'].isna())].apply(lambda x : (x['mdate'] + pd.Timedelta(days = 21)) ,axis= 1)
metadata['news_d'] = pd.to_datetime(metadata['news_d'])
metadata['lastreg'] = pd.to_datetime(metadata['lastreg'])
del metadata['mdate'], metadata['cash_back']
except Exception as e :
raise ValueError(f'Error occurs while downloading metadata due to {e} .')
return load_data_table(
file=metadata,
index_col=None,
show_progress=show_progress,
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info("Generating asset metadata.")
data = data.groupby(by="symbol").agg({"date": [np.min, np.max]})
data.reset_index(inplace=True)
data["start_date"] = data.date.amin
data["end_date"] = data.date.amax
del data["date"]
data.columns = data.columns.get_level_values(0)
data["exchange"] = "TEJ_XTAI"
data["auto_close_date"] = data["end_date"].values + pd.Timedelta(days=1)
return data
def parse_splits(data, show_progress):
if show_progress:
log.info("Parsing split data.")
data["split_ratio"] = 1.0 / data.split_ratio
data.rename(
columns={
"split_ratio": "ratio",
"date": "effective_date",
},
inplace=True,
copy=False,
)
return data
def parse_stock_dividend_payouts(data, show_progress) :
if show_progress :
log.info("Parsing stock dividend payouts.")
data['payment_sid'] = data['sid']
data['ex_date'] ,data['declare_date'],data['record_date'],data['payout_date'],= pd.NaT
data['ratio'] = pd.NA
return data
def parse_dividends(data, show_progress):
if show_progress:
log.info("Parsing dividend data.")
data['pay_date'] = data['out_pay']
data["record_date"] = data['lastreg']
data["declared_date"] = data['news_d']
# data["record_date"] = data["declared_date"] = data["pay_date"] = pd.NaT
del data['out_pay'], data['news_d'] , data['lastreg']
data.rename(
columns={
"ex_dividend": "amount",
"date": "ex_date",
},
inplace=True,
copy=False,
)
return data
def parse_pricing_and_vol(data, sessions, symbol_map):
for asset_id, symbol in symbol_map.items():
asset_data = (
data.xs(symbol, level=1).reindex(sessions.tz_localize(None)).fillna(0.0)
)
yield asset_id, asset_data
def tej_bundle(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get('TEJAPI_KEY')
coid = environ.get('ticker')
if coid.lower() == "all" :
confirm = input("Warning, you are trying to download all company, it may spend lots of time and api flow.\nPlease enter y to continue[Y/n].")
if confirm.lower() == "y" :
coid = None
else :
raise ValueError(
"Please reset company id in your environment variable and retry."
)
elif coid :
coid = re.split('[,; ]',coid)
else :
raise ValueError(
"Please set company id in your environment variable and retry."
)
mdate = environ.get('mdate')
if mdate :
mdate = re.split('[,; ]',mdate)
if len(mdate) == 1 :
mdate.append(pd.to_datetime(datetime.today()).strftime('%Y%m%d'))
elif len(mdate) > 2 :
raise IndexError(
"mdate must less than or equal to 2 parameters, please reset mdate."
)
mdate = {'gte':mdate[0],'lte':mdate[1]}
if api_key is None:
raise ValueError(
"Please set your TEJAPI_KEY environment variable and retry."
)
source_csv = os.environ.get('raw_source')
csv_output_path = os.path.join(output_dir,'raw.csv')
raw_data = fetch_data_table(
api_key, show_progress , coid , mdate
)
if source_csv :
source_csv = os.path.join(source_csv,'raw.csv')
origin_raw = pd.read_csv(source_csv,dtype = {'symbol':str,})
origin_raw['out_pay'] = origin_raw['out_pay'].fillna(pd.NaT)
origin_raw['news_d'] = origin_raw['news_d'].fillna(pd.NaT)
origin_raw['lastreg'] = origin_raw['lastreg'].fillna(pd.NaT)
raw_data = pd.concat([raw_data,origin_raw])
raw_data['date'] = raw_data['date'].apply(pd.to_datetime)
raw_data = raw_data.drop_duplicates(subset = ['symbol','date'])
raw_data = raw_data.reset_index(drop = True)
raw_data.to_csv(csv_output_path,index = False)
asset_metadata = gen_asset_metadata(raw_data[["symbol", "date"]], show_progress)
exchanges = pd.DataFrame(
data=[["TEJ_XTAI", "TEJ_XTAI", "TW"]],
columns=["exchange", "canonical_name", "country_code"],
)
asset_db_writer.write(equities=asset_metadata, exchanges=exchanges)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(["date", "symbol"], inplace=True)
clean_raw_data = raw_data[['open','high','low','close','volume','split_ratio','ex_dividend']].copy()
daily_bar_writer.write(
parse_pricing_and_vol(clean_raw_data, sessions, symbol_map),
show_progress=show_progress,
)
raw_data.reset_index(inplace=True)
raw_data["symbol"] = raw_data["symbol"].astype("category")
raw_data["sid"] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[
[
"sid",
"date",
"split_ratio",
]
].loc[raw_data.split_ratio != 1],
show_progress=show_progress,
),
dividends=parse_dividends(
raw_data[
[
"sid",
"date",
"ex_dividend",
'out_pay',
'news_d',
'lastreg',
'div_percent',
]
].loc[raw_data.ex_dividend != 0],
show_progress=show_progress,
),
# stock_dividend_payouts = parse_stock_dividend_payouts(
# raw_data[
# [
# "sid",
# "date",
# ]
# ].loc[raw_data.split_ratio != 1] ## 要改
# ,show_progress= show_progress)
)
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers["content-length"])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
register(
name = 'tquant',
f = tej_bundle,
calendar_name='TEJ',
start_session = None,
end_session = None,
minutes_per_day = 390,
create_writers = True,
)
register_calendar_alias("tquant", "TEJ") | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bundles/tquant.py | tquant.py |
import os
import sys
import pandas as pd #20230109 (by MRC)
from logbook import Logger, StreamHandler
from numpy import empty
from pandas import DataFrame, read_csv, Index, Timedelta, NaT
from zipline.utils.calendar_utils import register_calendar_alias
from zipline.utils.cli import maybe_show_progress
from . import core as bundles
handler = StreamHandler(sys.stdout, format_string=" | {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest
class CSVDIRBundle:
"""
Wrapper class to call csvdir_bundle with provided
list of time frames and a path to the csvdir directory
"""
def __init__(self, tframes=None, csvdir=None):
self.tframes = tframes
self.csvdir = csvdir
def ingest(
self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
csvdir_bundle(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
self.tframes,
self.csvdir,
)
@bundles.register("csvdir")
def csvdir_bundle(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
tframes=None,
csvdir=None,
):
"""
Build a zipline data bundle from the directory with csv files.
"""
if not csvdir:
csvdir = environ.get("CSVDIR")
if not csvdir:
raise ValueError("CSVDIR environment variable is not set")
if not os.path.isdir(csvdir):
raise ValueError("%s is not a directory" % csvdir)
if not tframes:
tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir))
if not tframes:
raise ValueError(
"'daily' and 'minute' directories " "not found in '%s'" % csvdir
)
divs_splits = {
"divs": DataFrame(
columns=[
"sid",
"amount",
"ex_date",
"record_date",
"declared_date",
"pay_date",
]
),
"splits": DataFrame(columns=["sid", "ratio", "effective_date"]),
}
for tframe in tframes:
ddir = os.path.join(csvdir, tframe)
symbols = sorted(
item.split(".csv")[0] for item in os.listdir(ddir) if ".csv" in item
)
if not symbols:
raise ValueError("no <symbol>.csv* files found in %s" % ddir)
dtype = [
("start_date", "datetime64[ns]"),
("end_date", "datetime64[ns]"),
("auto_close_date", "datetime64[ns]"),
("symbol", "object"),
]
metadata = DataFrame(empty(len(symbols), dtype=dtype))
if tframe == "minute":
writer = minute_bar_writer
else:
writer = daily_bar_writer
writer.write(
_pricing_iter(ddir, symbols, metadata, divs_splits, show_progress),
show_progress=show_progress,
)
# Hardcode the exchange to "CSVDIR" for all assets and (elsewhere)
# register "CSVDIR" to resolve to the NYSE calendar, because these
# are all equities and thus can use the NYSE calendar.
#---------------------------------------------------------------------------
#20230103 新增SQLDB exchange資料表的country_code欄位資料 (by MRC) #start
'''
#原始碼
metadata["exchange"] = "CSVDIR"
asset_db_writer.write(equities=metadata)
'''
exchanges = pd.DataFrame(
data=[["CSVDIR", "CSVDIR", "TW"]],
columns=["exchange", "canonical_name", "country_code"],
)
metadata["exchange"] = "CSVDIR"
asset_db_writer.write(equities=metadata, exchanges=exchanges)
#20230103 新增SQLDB exchange資料表的country_code欄位資料 (by MRC) #end
#---------------------------------------------------------------------------
divs_splits["divs"]["sid"] = divs_splits["divs"]["sid"].astype(int)
divs_splits["splits"]["sid"] = divs_splits["splits"]["sid"].astype(int)
adjustment_writer.write(
splits=divs_splits["splits"], dividends=divs_splits["divs"]
)
def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress):
with maybe_show_progress(
symbols, show_progress, label="Loading custom pricing data: "
) as it:
files = os.listdir(csvdir)
for sid, symbol in enumerate(it):
logger.debug("%s: sid %s" % (symbol, sid))
try:
fname = [fname for fname in files if "%s.csv" % symbol in fname][0]
except IndexError:
raise ValueError("%s.csv file is not in %s" % (symbol, csvdir))
dfr = read_csv(
os.path.join(csvdir, fname),
parse_dates=[0],
infer_datetime_format=True,
index_col=0,
).sort_index()
start_date = dfr.index[0]
end_date = dfr.index[-1]
# The auto_close date is the day after the last trade.
ac_date = end_date + Timedelta(days=1)
metadata.iloc[sid] = start_date, end_date, ac_date, symbol
if "split" in dfr.columns:
tmp = 1.0 / dfr[dfr["split"] != 1.0]["split"]
split = DataFrame(data=tmp.index.tolist(), columns=["effective_date"])
split["ratio"] = tmp.tolist()
split["sid"] = sid
splits = divs_splits["splits"]
index = Index(range(splits.shape[0], splits.shape[0] + split.shape[0]))
split.set_index(index, inplace=True)
divs_splits["splits"] = splits.append(split)
if "dividend" in dfr.columns:
# ex_date amount sid record_date declared_date pay_date
tmp = dfr[dfr["dividend"] != 0.0]["dividend"]
div = DataFrame(data=tmp.index.tolist(), columns=["ex_date"])
div["record_date"] = NaT
div["declared_date"] = NaT
div["pay_date"] = NaT
div["amount"] = tmp.tolist()
div["sid"] = sid
divs = divs_splits["divs"]
ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0]))
div.set_index(ind, inplace=True)
divs_splits["divs"] = divs.append(div)
yield sid, dfr
#register_calendar_alias("CSVDIR", "NYSE") #20230217 (by MRC) for pipeline
register_calendar_alias("CSVDIR", "XTAI") #20230217 (by MRC) for pipeline | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bundles/csvdir.py | csvdir.py |
from collections import namedtuple
import errno
import os
import shutil
import warnings
import click
from logbook import Logger
import pandas as pd
from zipline.utils.calendar_utils import get_calendar
from toolz import curry, complement, take
from ..adjustments import SQLiteAdjustmentReader, SQLiteAdjustmentWriter
from ..bcolz_daily_bars import BcolzDailyBarReader, BcolzDailyBarWriter
from ..minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
)
from zipline.assets import AssetDBWriter, AssetFinder, ASSET_DB_VERSION
from zipline.assets.asset_db_migrations import downgrade
from zipline.utils.cache import (
dataframe_cache,
working_dir,
working_file,
)
from zipline.utils.compat import ExitStack, mappingproxy
from zipline.utils.input_validation import ensure_timestamp, optionally
import zipline.utils.paths as pth
from zipline.utils.preprocess import preprocess
log = Logger(__name__)
def asset_db_path(bundle_name, timestr, environ=None, db_version=None):
return pth.data_path(
asset_db_relative(bundle_name, timestr, db_version),
environ=environ,
)
def minute_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
minute_equity_relative(bundle_name, timestr),
environ=environ,
)
def daily_equity_path(bundle_name, timestr, environ=None):
return pth.data_path(
daily_equity_relative(bundle_name, timestr),
environ=environ,
)
def adjustment_db_path(bundle_name, timestr, environ=None):
return pth.data_path(
adjustment_db_relative(bundle_name, timestr),
environ=environ,
)
def cache_path(bundle_name, environ=None):
return pth.data_path(
cache_relative(bundle_name),
environ=environ,
)
def adjustment_db_relative(bundle_name, timestr):
return bundle_name, timestr, "adjustments.sqlite"
def cache_relative(bundle_name):
return bundle_name, ".cache"
def daily_equity_relative(bundle_name, timestr):
return bundle_name, timestr, "daily_equities.bcolz"
def minute_equity_relative(bundle_name, timestr):
return bundle_name, timestr, "minute_equities.bcolz"
def asset_db_relative(bundle_name, timestr, db_version=None):
db_version = ASSET_DB_VERSION if db_version is None else db_version
return bundle_name, timestr, "assets-%d.sqlite" % db_version
def to_bundle_ingest_dirname(ts):
"""Convert a pandas Timestamp into the name of the directory for the
ingestion.
Parameters
----------
ts : pandas.Timestamp
The time of the ingestions
Returns
-------
name : str
The name of the directory for this ingestion.
"""
return ts.isoformat().replace(":", ";")
def from_bundle_ingest_dirname(cs):
"""Read a bundle ingestion directory name into a pandas Timestamp.
Parameters
----------
cs : str
The name of the directory.
Returns
-------
ts : pandas.Timestamp
The time when this ingestion happened.
"""
return pd.Timestamp(cs.replace(";", ":"))
def ingestions_for_bundle(bundle, environ=None):
return sorted(
(
from_bundle_ingest_dirname(ing)
for ing in os.listdir(pth.data_path([bundle], environ))
if not pth.hidden(ing)
),
reverse=True,
)
RegisteredBundle = namedtuple(
"RegisteredBundle",
[
"calendar_name",
"start_session",
"end_session",
"minutes_per_day",
"ingest",
"create_writers",
],
)
BundleData = namedtuple(
"BundleData",
"asset_finder equity_minute_bar_reader equity_daily_bar_reader "
"adjustment_reader",
)
BundleCore = namedtuple(
"BundleCore",
"bundles register unregister ingest load clean update switch bundle_info add download_bundle_info",
)
class UnknownBundle(click.ClickException, LookupError):
"""Raised if no bundle with the given name was registered."""
exit_code = 1
def __init__(self, name):
super(UnknownBundle, self).__init__(
"No bundle registered with the name %r" % name,
)
self.name = name
def __str__(self):
return self.message
class BadClean(click.ClickException, ValueError):
"""Exception indicating that an invalid argument set was passed to
``clean``.
Parameters
----------
before, after, keep_last : any
The bad arguments to ``clean``.
See Also
--------
clean
"""
def __init__(self, before, after, keep_last):
super(BadClean, self).__init__(
"Cannot pass a combination of `before` and `after` with "
"`keep_last`. Must pass one. "
"Got: before=%r, after=%r, keep_last=%r\n"
% (
before,
after,
keep_last,
),
)
def __str__(self):
return self.message
# TODO: simplify
# flake8: noqa: C901
def _make_bundle_core():
"""Create a family of data bundle functions that read from the same
bundle mapping.
Returns
-------
bundles : mappingproxy
The mapping of bundles to bundle payloads.
register : callable
The function which registers new bundles in the ``bundles`` mapping.
unregister : callable
The function which deregisters bundles from the ``bundles`` mapping.
ingest : callable
The function which downloads and write data for a given data bundle.
load : callable
The function which loads the ingested bundles back into memory.
clean : callable
The function which cleans up data written with ``ingest``.
update: callable
The function which update data written with ``ingest``.
"""
_bundles = {} # the registered bundles
# Expose _bundles through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another bundle.
bundles = mappingproxy(_bundles)
@curry
def register(
name,
f,
calendar_name="NYSE",
start_session=None,
end_session=None,
minutes_per_day=390,
create_writers=True,
):
"""Register a data bundle ingest function.
Parameters
----------
name : str
The name of the bundle.
f : callable
The ingest function. This function will be passed:
environ : mapping
The environment this is being run with.
asset_db_writer : AssetDBWriter
The asset db writer to write into.
minute_bar_writer : BcolzMinuteBarWriter
The minute bar writer to write into.
daily_bar_writer : BcolzDailyBarWriter
The daily bar writer to write into.
adjustment_writer : SQLiteAdjustmentWriter
The adjustment db writer to write into.
calendar : trading_calendars.TradingCalendar
The trading calendar to ingest for.
start_session : pd.Timestamp
The first session of data to ingest.
end_session : pd.Timestamp
The last session of data to ingest.
cache : DataFrameCache
A mapping object to temporarily store dataframes.
This should be used to cache intermediates in case the load
fails. This will be automatically cleaned up after a
successful load.
show_progress : bool
Show the progress for the current load where possible.
calendar_name : str, optional
The name of a calendar used to align bundle data.
Default is 'NYSE'.
start_session : pd.Timestamp, optional
The first session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the first_session of the calendar is used.
end_session : pd.Timestamp, optional
The last session for which we want data. If not provided,
or if the date lies outside the range supported by the
calendar, the last_session of the calendar is used.
minutes_per_day : int, optional
The number of minutes in each normal trading day.
create_writers : bool, optional
Should the ingest machinery create the writers for the ingest
function. This can be disabled as an optimization for cases where
they are not needed, like the ``quantopian-quandl`` bundle.
Notes
-----
This function my be used as a decorator, for example:
.. code-block:: python
@register('quandl')
def quandl_ingest_function(...):
...
See Also
--------
zipline.data.bundles.bundles
"""
if name in bundles:
warnings.warn(
"Overwriting bundle with name %r" % name,
stacklevel=3,
)
# NOTE: We don't eagerly compute calendar values here because
# `register` is called at module scope in zipline, and creating a
# calendar currently takes between 0.5 and 1 seconds, which causes a
# noticeable delay on the zipline CLI.
_bundles[name] = RegisteredBundle(
calendar_name=calendar_name,
start_session=start_session,
end_session=end_session,
minutes_per_day=minutes_per_day,
ingest=f,
create_writers=create_writers,
)
return f
def unregister(name):
"""Unregister a bundle.
Parameters
----------
name : str
The name of the bundle to unregister.
Raises
------
UnknownBundle
Raised when no bundle has been registered with the given name.
See Also
--------
zipline.data.bundles.bundles
"""
try:
del _bundles[name]
except KeyError:
raise UnknownBundle(name)
def ingest(
name,
environ=os.environ,
timestamp=None,
assets_versions=(),
show_progress=True,
):
"""Ingest data for a given bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. By default this is os.environ.
timestamp : datetime, optional
The timestamp to use for the load.
By default this is the current time.
assets_versions : Iterable[int], optional
Versions of the assets db to which to downgrade.
show_progress : bool, optional
Tell the ingest function to display the progress where possible.
"""
try:
bundle = bundles[name]
except KeyError:
raise UnknownBundle(name)
calendar = get_calendar(bundle.calendar_name)
start_session = bundle.start_session
end_session = bundle.end_session
if start_session is None or start_session < calendar.first_session:
start_session = calendar.first_session
if end_session is None or end_session > calendar.last_session:
end_session = calendar.last_session
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestamp = timestamp.tz_convert("utc").tz_localize(None)
timestr = to_bundle_ingest_dirname(timestamp)
cachepath = cache_path(name, environ=environ)
pth.ensure_directory(pth.data_path([name, timestr], environ=environ))
pth.ensure_directory(cachepath)
with dataframe_cache(
cachepath, clean_on_failure=False
) as cache, ExitStack() as stack:
# we use `cleanup_on_failure=False` so that we don't purge the
# cache directory if the load fails in the middle
if bundle.create_writers:
wd = stack.enter_context(
working_dir(pth.data_path([], environ=environ))
)
daily_bars_path = wd.ensure_dir(*daily_equity_relative(name, timestr))
daily_bar_writer = BcolzDailyBarWriter(
daily_bars_path,
calendar,
start_session,
end_session,
)
# Do an empty write to ensure that the daily ctables exist
# when we create the SQLiteAdjustmentWriter below. The
# SQLiteAdjustmentWriter needs to open the daily ctables so
# that it can compute the adjustment ratios for the dividends.
daily_bar_writer.write(())
minute_bar_writer = BcolzMinuteBarWriter(
wd.ensure_dir(*minute_equity_relative(name, timestr)),
calendar,
start_session,
end_session,
minutes_per_day=bundle.minutes_per_day,
)
assets_db_path = wd.getpath(*asset_db_relative(name, timestr))
asset_db_writer = AssetDBWriter(assets_db_path)
adjustment_db_writer = stack.enter_context(
SQLiteAdjustmentWriter(
wd.getpath(*adjustment_db_relative(name, timestr)),
BcolzDailyBarReader(daily_bars_path),
overwrite=True,
)
)
else:
daily_bar_writer = None
minute_bar_writer = None
asset_db_writer = None
adjustment_db_writer = None
if assets_versions:
raise ValueError(
"Need to ingest a bundle that creates "
"writers in order to downgrade the assets"
" db."
)
log.info("Ingesting {}.", name)
bundle.ingest(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_db_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
pth.data_path([name, timestr], environ=environ),
)
for version in sorted(set(assets_versions), reverse=True):
version_path = wd.getpath(
*asset_db_relative(
name,
timestr,
db_version=version,
)
)
with working_file(version_path) as wf:
shutil.copy2(assets_db_path, wf.path)
downgrade(wf.path, version)
def most_recent_data(bundle_name, timestamp, environ=None):
"""Get the path to the most recent data after ``date``for the
given bundle.
Parameters
----------
bundle_name : str
The name of the bundle to lookup.
timestamp : datetime
The timestamp to begin searching on or before.
environ : dict, optional
An environment dict to forward to zipline_root.
"""
if bundle_name not in bundles:
raise UnknownBundle(bundle_name)
try:
candidates = os.listdir(
pth.data_path([bundle_name], environ=environ),
)
return pth.data_path(
[
bundle_name,
max(
filter(complement(pth.hidden), candidates),
key=from_bundle_ingest_dirname,
),
],
environ=environ,
)
except (ValueError, OSError) as e:
if getattr(e, "errno", errno.ENOENT) != errno.ENOENT:
raise
raise ValueError(
"no data for bundle {bundle!r} on or before {timestamp}\n"
"maybe you need to run: $ zipline ingest -b {bundle}".format(
bundle=bundle_name,
timestamp=timestamp,
),
)
def load(name, environ=os.environ, timestamp=None):
"""Loads a previously ingested bundle.
Parameters
----------
name : str
The name of the bundle.
environ : mapping, optional
The environment variables. Defaults of os.environ.
timestamp : datetime, optional
The timestamp of the data to lookup.
Defaults to the current time.
Returns
-------
bundle_data : BundleData
The raw data readers for this bundle.
"""
if timestamp is None:
timestamp = pd.Timestamp.utcnow()
timestr = most_recent_data(name, timestamp, environ=environ)
return BundleData(
asset_finder=AssetFinder(
asset_db_path(name, timestr, environ=environ),
),
equity_minute_bar_reader=BcolzMinuteBarReader(
minute_equity_path(name, timestr, environ=environ),
),
equity_daily_bar_reader=BcolzDailyBarReader(
daily_equity_path(name, timestr, environ=environ),
),
adjustment_reader=SQLiteAdjustmentReader(
adjustment_db_path(name, timestr, environ=environ),
),
)
@preprocess(
before=optionally(ensure_timestamp),
after=optionally(ensure_timestamp),
)
def clean(name, before=None, after=None, keep_last=None, environ=os.environ):
"""Clean up data that was created with ``ingest`` or
``$ python -m zipline ingest``
Parameters
----------
name : str
The name of the bundle to remove data for.
before : datetime, optional
Remove data ingested before this date.
This argument is mutually exclusive with: keep_last
after : datetime, optional
Remove data ingested after this date.
This argument is mutually exclusive with: keep_last
keep_last : int, optional
Remove all but the last ``keep_last`` ingestions.
This argument is mutually exclusive with:
before
after
environ : mapping, optional
The environment variables. Defaults of os.environ.
Returns
-------
cleaned : set[str]
The names of the runs that were removed.
Raises
------
BadClean
Raised when ``before`` and or ``after`` are passed with
``keep_last``. This is a subclass of ``ValueError``.
"""
try:
all_runs = sorted(
filter(
complement(pth.hidden),
os.listdir(pth.data_path([name], environ=environ)),
),
key=from_bundle_ingest_dirname,
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise UnknownBundle(name)
if before is after is keep_last is None:
raise BadClean(before, after, keep_last)
if (before is not None or after is not None) and keep_last is not None:
raise BadClean(before, after, keep_last)
if keep_last is None:
def should_clean(name):
dt = from_bundle_ingest_dirname(name)
return (before is not None and dt < before) or (
after is not None and dt > after
)
elif keep_last >= 0:
last_n_dts = set(take(keep_last, reversed(all_runs)))
def should_clean(name):
return name not in last_n_dts
else:
raise BadClean(before, after, keep_last)
cleaned = set()
for run in all_runs:
if should_clean(run):
log.info("Cleaning {}.", run)
path = pth.data_path([name, run], environ=environ)
shutil.rmtree(path)
cleaned.add(path)
return cleaned
def update(name, environ=os.environ):
"""Update data that was created with ``ingest`` or
``$ python -m zipline ingest``
Parameters
----------
name : str
The name of the bundle to update data for.
environ : mapping, optional
The environment variables. Defaults of os.environ.
"""
timestamp = pd.Timestamp.utcnow()
timestr = most_recent_data(name, timestamp, environ=environ)
os.environ['raw_source'] = timestr
bundle_data = load(name)
equities = (bundle_data.asset_finder.retrieve_all(bundle_data.asset_finder.sids))
tickers = [equity.symbol for equity in equities]
os.environ['ticker'] = ','.join(tickers)
min_start = min([equity.end_date for equity in equities]) + pd.Timedelta(days = 1)
min_start_str = min_start.strftime('%Y%m%d')
os.environ['mdate'] = min_start_str
try :
ingest(name,os.environ,timestamp=timestamp,assets_versions=(),show_progress= True)
except Exception as e :
log.info(f"Can not update due to {e}.")
no_tz_timestamp = timestamp.tz_convert("utc").tz_localize(None)
rm_path = pth.data_path([name,to_bundle_ingest_dirname(no_tz_timestamp)])
shutil.rmtree(rm_path)
def switch( name , time = None , environ = os.environ ) :
timestamp = pd.Timestamp.utcnow()
timestamp = timestamp.tz_convert("utc").tz_localize(None)
time = pd.Timestamp(time)
time = to_bundle_ingest_dirname(time)
old_path = pth.data_path([name,time])
timestr = to_bundle_ingest_dirname(timestamp)
new_path = pth.data_path([name,timestr])
os.rename(old_path , new_path)
def bundle_info(name , time = None , environ = os.environ ):
if time is None :
time = pd.Timestamp.utcnow()
timestr = most_recent_data(name, time, environ=environ)
else :
time = pd.Timestamp(time)
timestr = to_bundle_ingest_dirname(time)
bundle_data = BundleData(
asset_finder=AssetFinder(
asset_db_path(name, timestr, environ=environ),
),
equity_minute_bar_reader=BcolzMinuteBarReader(
minute_equity_path(name, timestr, environ=environ),
),
equity_daily_bar_reader=BcolzDailyBarReader(
daily_equity_path(name, timestr, environ=environ),
),
adjustment_reader=SQLiteAdjustmentReader(
adjustment_db_path(name, timestr, environ=environ),
),
)
equities = (bundle_data.asset_finder.retrieve_all(bundle_data.asset_finder.sids))
tickers = [equity.symbol for equity in equities]
min_start = min([equity.start_date for equity in equities])
min_start_str = min_start.strftime('%Y%m%d')
max_end = max([equity.end_date for equity in equities])
max_end_str = max_end.strftime('%Y%m%d')
click.echo("tickers :")
cnt = 1
for ticker in tickers :
if cnt == 10 :
click.echo("%s"%ticker)
cnt = 1
continue
click.echo("%s "%ticker,nl = False)
cnt +=1
click.echo("")
click.echo("start_date : %s."%min_start_str)
click.echo("end_date : %s."%max_end_str)
def add(name , company , environ = os.environ ):
timestamp = pd.Timestamp.utcnow()
timestr = most_recent_data(name, timestamp, environ=environ)
os.environ['raw_source'] = timestr
bundle_data = load(name)
equities = (bundle_data.asset_finder.retrieve_all(bundle_data.asset_finder.sids))
os.environ['ticker'] = company
min_start = min([equity.start_date for equity in equities])
min_start_str = min_start.strftime('%Y%m%d')
max_end = max([equity.end_date for equity in equities])
max_end_str = max_end.strftime('%Y%m%d')
os.environ['mdate'] = min_start_str + ',' + max_end_str
try :
ingest(name,os.environ,timestamp=timestamp,assets_versions=(),show_progress= True)
except Exception as e :
log.info(f"Can not update due to {e}.")
no_tz_timestamp = timestamp.tz_convert("utc").tz_localize(None)
rm_path = pth.data_path([name,to_bundle_ingest_dirname(no_tz_timestamp)])
shutil.rmtree(rm_path)
def download_bundle_info(name , time = None , environ = os.environ ):
time = pd.Timestamp(time)
timestr = to_bundle_ingest_dirname(time)
bundle_data = BundleData(
asset_finder=AssetFinder(
asset_db_path(name, timestr, environ=environ),
),
equity_minute_bar_reader=BcolzMinuteBarReader(
minute_equity_path(name, timestr, environ=environ),
),
equity_daily_bar_reader=BcolzDailyBarReader(
daily_equity_path(name, timestr, environ=environ),
),
adjustment_reader=SQLiteAdjustmentReader(
adjustment_db_path(name, timestr, environ=environ),
),
)
equities = (bundle_data.asset_finder.retrieve_all(bundle_data.asset_finder.sids))
tickers = [equity.symbol for equity in equities]
min_start = min([equity.start_date for equity in equities])
min_start_str = min_start.strftime('%Y%m%d')
max_end = max([equity.end_date for equity in equities])
max_end_str = max_end.strftime('%Y%m%d')
dic = dict({'name':[name],'timestr':[timestr],'timestamp':[time],'tickers': [tickers] , 'start_date': [min_start_str] , 'end_date' : [max_end_str]})
return pd.DataFrame.from_dict(dic,orient = 'columns')
return BundleCore(bundles, register, unregister, ingest, load, clean, update , switch , bundle_info , add ,download_bundle_info)
bundles, register, unregister, ingest, load, clean , update , switch , bundle_info , add ,download_bundle_info = _make_bundle_core() | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bundles/core.py | core.py |
from io import BytesIO
import tarfile
from zipfile import ZipFile
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from urllib.parse import urlencode
from zipline.utils.calendar_utils import register_calendar_alias
from . import core as bundles
import numpy as np
log = Logger(__name__)
ONE_MEGABYTE = 1024 * 1024
QUANDL_DATA_URL = "https://www.quandl.com/api/v3/datatables/WIKI/PRICES.csv?"
def format_metadata_url(api_key):
"""Build the query URL for Quandl WIKI Prices metadata."""
query_params = [("api_key", api_key), ("qopts.export", "true")]
return QUANDL_DATA_URL + urlencode(query_params)
def load_data_table(file, index_col, show_progress=False):
"""Load data table from zip file provided by Quandl."""
with ZipFile(file) as zip_file:
file_names = zip_file.namelist()
assert len(file_names) == 1, "Expected a single file from Quandl."
wiki_prices = file_names.pop()
with zip_file.open(wiki_prices) as table_file:
if show_progress:
log.info("Parsing raw data.")
data_table = pd.read_csv(
table_file,
parse_dates=["date"],
index_col=index_col,
usecols=[
"ticker",
"date",
"open",
"high",
"low",
"close",
"volume",
"ex-dividend",
"split_ratio",
],
)
data_table.rename(
columns={
"ticker": "symbol",
"ex-dividend": "ex_dividend",
},
inplace=True,
copy=False,
)
return data_table
def fetch_data_table(api_key, show_progress, retries):
"""Fetch WIKI Prices data table from Quandl"""
for _ in range(retries):
try:
if show_progress:
log.info("Downloading WIKI metadata.")
metadata = pd.read_csv(format_metadata_url(api_key))
# Extract link from metadata and download zip file.
table_url = metadata.loc[0, "file.link"]
if show_progress:
raw_file = download_with_progress(
table_url,
chunk_size=ONE_MEGABYTE,
label="Downloading WIKI Prices table from Quandl",
)
else:
raw_file = download_without_progress(table_url)
return load_data_table(
file=raw_file,
index_col=None,
show_progress=show_progress,
)
except Exception:
log.exception("Exception raised reading Quandl data. Retrying.")
else:
raise ValueError(
"Failed to download Quandl data after %d attempts." % (retries)
)
def gen_asset_metadata(data, show_progress):
if show_progress:
log.info("Generating asset metadata.")
data = data.groupby(by="symbol").agg({"date": [np.min, np.max]})
data.reset_index(inplace=True)
data["start_date"] = data.date.amin
data["end_date"] = data.date.amax
del data["date"]
data.columns = data.columns.get_level_values(0)
data["exchange"] = "QUANDL"
data["auto_close_date"] = data["end_date"].values + pd.Timedelta(days=1)
return data
def parse_splits(data, show_progress):
if show_progress:
log.info("Parsing split data.")
data["split_ratio"] = 1.0 / data.split_ratio
data.rename(
columns={
"split_ratio": "ratio",
"date": "effective_date",
},
inplace=True,
copy=False,
)
return data
def parse_dividends(data, show_progress):
if show_progress:
log.info("Parsing dividend data.")
data["record_date"] = data["declared_date"] = data["pay_date"] = pd.NaT
data.rename(
columns={
"ex_dividend": "amount",
"date": "ex_date",
},
inplace=True,
copy=False,
)
return data
def parse_pricing_and_vol(data, sessions, symbol_map):
for asset_id, symbol in symbol_map.items():
asset_data = (
data.xs(symbol, level=1).reindex(sessions.tz_localize(None)).fillna(0.0)
)
yield asset_id, asset_data
@bundles.register("quandl")
def quandl_bundle(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
"""
quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset.
For more information on Quandl's API and how to obtain an API key,
please visit https://docs.quandl.com/docs#section-authentication
"""
api_key = environ.get("QUANDL_API_KEY")
if api_key is None:
raise ValueError(
"Please set your QUANDL_API_KEY environment variable and retry."
)
raw_data = fetch_data_table(
api_key, show_progress, environ.get("QUANDL_DOWNLOAD_ATTEMPTS", 5)
)
asset_metadata = gen_asset_metadata(raw_data[["symbol", "date"]], show_progress)
exchanges = pd.DataFrame(
data=[["QUANDL", "QUANDL", "US"]],
columns=["exchange", "canonical_name", "country_code"],
)
asset_db_writer.write(equities=asset_metadata, exchanges=exchanges)
symbol_map = asset_metadata.symbol
sessions = calendar.sessions_in_range(start_session, end_session)
raw_data.set_index(["date", "symbol"], inplace=True)
daily_bar_writer.write(
parse_pricing_and_vol(raw_data, sessions, symbol_map),
show_progress=show_progress,
)
raw_data.reset_index(inplace=True)
raw_data["symbol"] = raw_data["symbol"].astype("category")
raw_data["sid"] = raw_data.symbol.cat.codes
adjustment_writer.write(
splits=parse_splits(
raw_data[
[
"sid",
"date",
"split_ratio",
]
].loc[raw_data.split_ratio != 1],
show_progress=show_progress,
),
dividends=parse_dividends(
raw_data[
[
"sid",
"date",
"ex_dividend",
]
].loc[raw_data.ex_dividend != 0],
show_progress=show_progress,
),
)
def download_with_progress(url, chunk_size, **progress_kwargs):
"""
Download streaming data from a URL, printing progress information to the
terminal.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
chunk_size : int
Number of bytes to read at a time from requests.
**progress_kwargs
Forwarded to click.progressbar.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url, stream=True)
resp.raise_for_status()
total_size = int(resp.headers["content-length"])
data = BytesIO()
with progressbar(length=total_size, **progress_kwargs) as pbar:
for chunk in resp.iter_content(chunk_size=chunk_size):
data.write(chunk)
pbar.update(len(chunk))
data.seek(0)
return data
def download_without_progress(url):
"""
Download data from a URL, returning a BytesIO containing the loaded data.
Parameters
----------
url : str
A URL that can be understood by ``requests.get``.
Returns
-------
data : BytesIO
A BytesIO containing the downloaded data.
"""
resp = requests.get(url)
resp.raise_for_status()
return BytesIO(resp.content)
QUANTOPIAN_QUANDL_URL = "https://s3.amazonaws.com/quantopian-public-zipline-data/quandl"
@bundles.register("quantopian-quandl", create_writers=False)
def quantopian_quandl_bundle(
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
if show_progress:
data = download_with_progress(
QUANTOPIAN_QUANDL_URL,
chunk_size=ONE_MEGABYTE,
label="Downloading Bundle: quantopian-quandl",
)
else:
data = download_without_progress(QUANTOPIAN_QUANDL_URL)
with tarfile.open("r", fileobj=data) as tar:
if show_progress:
log.info("Writing data to %s." % output_dir)
tar.extractall(output_dir)
register_calendar_alias("QUANDL", "NYSE") | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/bundles/quandl.py | quandl.py |
from interface import default, Interface
import numpy as np
import pandas as pd
from zipline.utils.date_utils import make_utc_aware
from zipline.utils.sentinel import sentinel
from zipline.lib._factorize import factorize_strings
DEFAULT_FX_RATE = sentinel("DEFAULT_FX_RATE")
class FXRateReader(Interface):
"""
Interface for reading foreign exchange (fx) rates.
An FX rate reader contains one or more distinct "rates", each of which
corresponds to a collection of mappings from (quote, base, dt) ->
float. The value produced for a given (quote, base, dt) triple is the
exchange rate to use when converting from ``base`` to ``quote`` on ``dt``.
The specific set of rates contained in a particular reader is
user-defined. We infer no particular semantics from their names, other than
that they are distinct rates. Examples of possible rate names might be
things like "bid", "mid", and "ask", or "london_close", "tokyo_close",
"nyse_close".
Implementations of :class:`FXRateReader` must provide at least one method::
def get_rates(self, rate, quote, bases, dts):
which takes a rate, a quote currency, an array of base currencies, and an
array of dts, and produces a (len(dts), len(base))-shape array containing a
conversion rates for all pairs in the cartesian product of bases and dts.
Given a definition of :meth:`get_rates`, this interface automatically
generates two additional methods::
def get_rates_scalar(self, rate, quote, base, dt):
and::
def get_rates_columnar(self, rate, quote, bases, dts):
:meth:`get_rates_scalar` takes scalar-valued ``base`` and ``dt`` values,
and returns a scalar float value for the requested fx rate.
:meth:`get_rates_columnar` takes parallel arrays of ``bases`` and ``dts``
and returns a same-length array of fx rates by performing a lookup on the
(base, dt) pairs drawn from zipping together ``bases``, and ``dts``. In
other words, its behavior is equivalent to::
def get_rates_columnnar(self, rate, quote, bases, dts):
out = []
for base, dt in zip(bases, dts):
out.append(self.get_rate_scalar(rate, quote, base, dt))
return np.array(out)
"""
def get_rates(self, rate, quote, bases, dts):
"""
Load a 2D array of fx rates.
Parameters
----------
rate : str
Name of the rate to load.
quote : str
Currency code of the currency to convert into.
bases : np.array[object]
Array of codes of the currencies to convert from. The same currency
may appear multiple times.
dts : pd.DatetimeIndex
Datetimes for which to load rates. Must be sorted in ascending
order and localized to UTC.
Returns
-------
rates : np.array
Array of shape ``(len(dts), len(bases))`` containing foreign
exchange rates mapping currencies from ``bases`` to ``quote``.
The row at index i corresponds to the dt in dts[i].
The column at index j corresponds to the base currency in bases[j].
"""
@default
def get_rate_scalar(self, rate, quote, base, dt):
"""
Load a scalar FX rate value.
Parameters
----------
rate : str
Name of the rate to load.
quote : str
Currency code of the currency to convert into.
base : str
Currency code of the currency to convert from.
dt : np.datetime64 or pd.Timestamp
Datetime on which to load rate.
Returns
-------
rate : np.float64
Exchange rate from base -> quote on dt.
"""
rates_2d = self.get_rates(
rate,
quote,
bases=np.array([base], dtype=object),
dts=make_utc_aware(pd.DatetimeIndex([dt])),
)
return rates_2d[0, 0]
@default
def get_rates_columnar(self, rate, quote, bases, dts):
"""
Load a 1D array of FX rates.
Parameters
----------
rate : str
Name of the rate to load.
quote : str
Currency code of the currency to convert into.
bases : np.array[object]
Array of codes of the currencies to convert from. The same currency
may appear multiple times.
dts : np.DatetimeIndex
Datetimes for which to load rates. The same value may appear
multiple times. Datetimes do not need to be sorted.
"""
if len(bases) != len(dts):
raise ValueError(
"len(bases) ({}) != len(dts) ({})".format(len(bases), len(dts))
)
bases_ix, unique_bases, _ = factorize_strings(
bases,
missing_value=None,
# Only dts need to be sorted, not bases.
sort=False,
)
# NOTE: np.unique returns unique_dts in sorted order, which is required
# for calling get_rates.
unique_dts, dts_ix = np.unique(dts.values, return_inverse=True)
rates_2d = self.get_rates(
rate, quote, unique_bases, pd.DatetimeIndex(unique_dts, tz="utc")
)
return rates_2d[dts_ix, bases_ix] | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/fx/base.py | base.py |
from interface import implements
import h5py
from logbook import Logger
import numpy as np
import pandas as pd
from zipline.utils.memoize import lazyval
from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array
from .base import FXRateReader, DEFAULT_FX_RATE
from .utils import check_dts, is_sorted_ascending
HDF5_FX_VERSION = 0
HDF5_FX_DEFAULT_CHUNK_SIZE = 75
INDEX = "index"
DATA = "data"
CURRENCIES = "currencies"
DTS = "dts"
RATES = "rates"
log = Logger(__name__)
class HDF5FXRateReader(implements(FXRateReader)):
"""An FXRateReader backed by HDF5.
Parameters
----------
group : h5py.Group
Top-level group written by an :class:`HDF5FXRateWriter`.
default_rate : str
Rate to use when ``get_rates`` is called requesting the default rate.
"""
def __init__(self, group, default_rate):
self._group = group
self._default_rate = default_rate
if self.version != HDF5_FX_VERSION:
raise ValueError(
"FX Reader version ({}) != File Version ({})".format(
HDF5_FX_VERSION,
self.version,
)
)
@classmethod
def from_path(cls, path, default_rate):
"""
Construct from a file path.
Parameters
----------
path : str
Path to an HDF5 fx rates file.
default_rate : str
Rate to use when ``get_rates`` is called requesting the default
rate.
"""
return cls(h5py.File(path), default_rate=default_rate)
@lazyval
def version(self):
try:
return self._group.attrs["version"]
except KeyError:
# TODO: Remove this.
return 0
@lazyval
def dts(self):
"""Column labels for rate groups."""
raw_dts = self._group[INDEX][DTS][:].astype("M8[ns]")
if not is_sorted_ascending(raw_dts):
raise ValueError("dts are not sorted for {}!".format(self._group))
return pd.DatetimeIndex(raw_dts, tz="UTC")
@lazyval
def currencies(self):
"""Row labels for rate groups."""
# Currencies are stored as fixed-length bytes in the file, but we want
# `str` objects in memory.
bytes_array = self._group[INDEX][CURRENCIES][:]
objects = bytes_array_to_native_str_object_array(bytes_array)
return pd.Index(objects)
def get_rates(self, rate, quote, bases, dts):
"""Get rates to convert ``bases`` into ``quote``.
See :class:`zipline.data.fx.base.FXRateReader` for details.
"""
if rate == DEFAULT_FX_RATE:
rate = self._default_rate
check_dts(dts)
col_ixs = self.dts.searchsorted(dts, side="right") - 1
row_ixs = self.currencies.get_indexer(bases)
try:
dataset = self._group[DATA][rate][quote][RATES]
except KeyError:
raise ValueError(
"FX rates not available for rate={}, quote_currency={}.".format(
rate, quote
)
)
# OPTIMIZATION: Column indices correspond to dates, which must be in
# sorted order. Rather than reading the entire dataset from h5, we can
# read just the interval from min_col to max_col inclusive
#
# However, we also need to handle two important edge cases:
#
# 1. row_ixs contains -1 for any currencies we don't know about.
# 2. col_ixs contains -1 for dts before the start of self.dts.
#
# If either of the above cases obtains, we want to return NaN for the
# corresponding output locations.
# We handle each of these cases by reading raw data into a buffer with
# one extra column and one extra row. When we then permute the raw data
# into the correct order, any row or column indices with values of -1
# will pull from the extra row/column, which will always contain NaN.
slice_begin = max(col_ixs[0], 0)
slice_end = max(col_ixs[-1], 0) + 1 # +1 to be inclusive of end date.
# Allocate a buffer full of NaNs with one extra column and row. See
# OPTIMIZATION notes above.
buf = np.full(
(len(self.currencies) + 1, slice_end - slice_begin + 1),
np.nan,
)
buf[:-1, :-1] = dataset[:, slice_begin:slice_end]
# Permute the rows into place, pulling from the empty NaN locations for
# row and column indices of -1.
out = buf[:, col_ixs - slice_begin][row_ixs]
# Transpose everything to maintain dts as row labels, currencies as col
# labels which is expected everywhere else.
return out.transpose()
class HDF5FXRateWriter(object):
"""Writer class for HDF5 files consumed by HDF5FXRateReader."""
def __init__(self, group, date_chunk_size=HDF5_FX_DEFAULT_CHUNK_SIZE):
self._group = group
self._date_chunk_size = date_chunk_size
def write(self, dts, currencies, data):
"""Write data to the file.
Parameters
----------
dts : pd.DatetimeIndex
Index of row labels for rates to be written.
currencies : np.array[object]
Array of column labels for rates to be written.
data : iterator[(str, str, np.array[float64])]
Iterator of (rate, quote_currency, array) tuples. Each array
should be of shape ``(len(dts), len(currencies))``, and should
contain a table of rates where each column is a timeseries of rates
mapping its column label's currency to ``quote_currency``.
"""
if len(currencies):
chunks = (len(currencies), min(self._date_chunk_size, len(dts)))
else:
# h5py crashes if we provide chunks for empty data.
chunks = None
self._write_metadata()
self._write_index_group(dts, currencies)
self._write_data_group(dts, currencies, data, chunks)
def _write_metadata(self):
self._group.attrs["version"] = HDF5_FX_VERSION
self._group.attrs["last_updated_utc"] = str(pd.Timestamp.utcnow())
def _write_index_group(self, dts, currencies):
"""Write content of /index."""
if not is_sorted_ascending(dts):
raise ValueError("dts is not sorted")
for c in currencies:
if not isinstance(c, str) or len(c) != 3:
raise ValueError("Invalid currency: {!r}".format(c))
index_group = self._group.create_group(INDEX)
self._log_writing(INDEX, DTS)
index_group.create_dataset(DTS, data=dts.astype("int64"))
self._log_writing(INDEX, CURRENCIES)
index_group.create_dataset(CURRENCIES, data=currencies.astype("S3"))
def _write_data_group(self, dts, currencies, data, chunks):
"""Write content of /data."""
data_group = self._group.create_group(DATA)
expected_shape = (len(dts), len(currencies))
for rate, quote, array in data:
if array.shape != expected_shape:
raise ValueError(
"Unexpected shape for rate={}, quote={}."
"\nExpected shape: {}. Got {}.".format(
rate, quote, expected_shape, array.shape
)
)
self._log_writing(DATA, rate, quote)
target = data_group.require_group("/".join((rate, quote)))
# Transpose the rates array so that the hdf5 file holds arrays
# with currencies as row labels and dates as column labels. This
# helps with compression, as the *rows* (rather than the columns)
# all have similar values, which lends itself to the HDF5 file's
# C-contiguous storage.
target.create_dataset(
RATES,
data=array.transpose(),
chunks=chunks,
compression="lzf",
shuffle=True,
)
def _log_writing(self, *path):
log.debug("Writing {}", "/".join(path)) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/data/fx/hdf5.py | hdf5.py |
import sys
from textwrap import dedent
class _Sentinel(object):
"""Base class for Sentinel objects."""
__slots__ = ("__weakref__",)
def is_sentinel(obj):
return isinstance(obj, _Sentinel)
def sentinel(name, doc=None):
try:
value = sentinel._cache[name] # memoized
except KeyError:
pass
else:
if doc == value.__doc__:
return value
raise ValueError(
dedent(
"""\
New sentinel value %r conflicts with an existing sentinel of the
same name.
Old sentinel docstring: %r
New sentinel docstring: %r
The old sentinel was created at: %s
Resolve this conflict by changing the name of one of the sentinels.
""",
)
% (name, value.__doc__, doc, value._created_at)
)
try:
frame = sys._getframe(1)
except ValueError:
frame = None
if frame is None:
created_at = "<unknown>"
else:
created_at = "%s:%s" % (frame.f_code.co_filename, frame.f_lineno)
@object.__new__ # bind a single instance to the name 'Sentinel'
class Sentinel(_Sentinel):
__doc__ = doc
__name__ = name
# store created_at so that we can report this in case of a duplicate
# name violation
_created_at = created_at
def __new__(cls):
raise TypeError("cannot create %r instances" % name)
def __repr__(self):
return "sentinel(%r)" % name
def __reduce__(self):
return sentinel, (name, doc)
def __deepcopy__(self, _memo):
return self
def __copy__(self):
return self
cls = type(Sentinel)
try:
cls.__module__ = frame.f_globals["__name__"]
except (AttributeError, KeyError):
# Couldn't get the name from the calling scope, just use None.
# AttributeError is when frame is None, KeyError is when f_globals
# doesn't hold '__name__'
cls.__module__ = None
sentinel._cache[name] = Sentinel # cache result
return Sentinel
sentinel._cache = {} | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/sentinel.py | sentinel.py |
import datetime
from copy import deepcopy
import numpy as np
import pandas as pd
def _ensure_index(x):
if not isinstance(x, pd.Index):
x = pd.Index(sorted(x))
return x
class RollingPanel(object):
"""
Preallocation strategies for rolling window over expanding data set
Restrictions: major_axis can only be a DatetimeIndex for now
"""
def __init__(
self,
window,
items,
sids,
cap_multiple=2,
dtype=np.float64,
initial_dates=None,
):
self._pos = window
self._window = window
self.items = _ensure_index(items)
self.minor_axis = _ensure_index(sids)
self.cap_multiple = cap_multiple
self.dtype = dtype
if initial_dates is None:
self.date_buf = np.empty(self.cap, dtype="M8[ns]") * pd.NaT
elif len(initial_dates) != window:
raise ValueError("initial_dates must be of length window")
else:
self.date_buf = np.hstack(
(
initial_dates,
np.empty(
window * (cap_multiple - 1),
dtype="datetime64[ns]",
),
),
)
self.buffer = self._create_buffer()
@property
def cap(self):
return self.cap_multiple * self._window
@property
def _start_index(self):
return self._pos - self._window
@property
def start_date(self):
return self.date_buf[self._start_index]
def oldest_frame(self, raw=False):
"""
Get the oldest frame in the panel.
"""
if raw:
return self.buffer.values[:, self._start_index, :]
return self.buffer.iloc[:, self._start_index, :]
def set_minor_axis(self, minor_axis):
self.minor_axis = _ensure_index(minor_axis)
self.buffer = self.buffer.reindex(minor_axis=self.minor_axis)
def set_items(self, items):
self.items = _ensure_index(items)
self.buffer = self.buffer.reindex(items=self.items)
def _create_buffer(self):
panel = pd.Panel(
items=self.items,
minor_axis=self.minor_axis,
major_axis=range(self.cap),
dtype=self.dtype,
)
return panel
def extend_back(self, missing_dts):
"""
Resizes the buffer to hold a new window with a new cap_multiple.
If cap_multiple is None, then the old cap_multiple is used.
"""
delta = len(missing_dts)
if not delta:
raise ValueError(
"missing_dts must be a non-empty index",
)
self._window += delta
self._pos += delta
self.date_buf = self.date_buf.copy()
self.date_buf.resize(self.cap)
self.date_buf = np.roll(self.date_buf, delta)
old_vals = self.buffer.values
shape = old_vals.shape
nan_arr = np.empty((shape[0], delta, shape[2]))
nan_arr.fill(np.nan)
new_vals = np.column_stack(
(
nan_arr,
old_vals,
np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2])),
),
)
self.buffer = pd.Panel(
data=new_vals,
items=self.items,
minor_axis=self.minor_axis,
major_axis=np.arange(self.cap),
dtype=self.dtype,
)
# Fill the delta with the dates we calculated.
where = slice(self._start_index, self._start_index + delta)
self.date_buf[where] = missing_dts
def add_frame(self, tick, frame, minor_axis=None, items=None):
""" """
if self._pos == self.cap:
self._roll_data()
values = frame
if isinstance(frame, pd.DataFrame):
values = frame.values
self.buffer.values[:, self._pos, :] = values.astype(self.dtype)
self.date_buf[self._pos] = tick
self._pos += 1
def get_current(self, item=None, raw=False, start=None, end=None):
"""
Get a Panel that is the current data in view. It is not safe to persist
these objects because internal data might change
"""
item_indexer = slice(None)
if item:
item_indexer = self.items.get_loc(item)
start_index = self._start_index
end_index = self._pos
# get inital date window
where = slice(start_index, end_index)
current_dates = self.date_buf[where]
def convert_datelike_to_long(dt):
if isinstance(dt, pd.Timestamp):
return dt.asm8
if isinstance(dt, datetime.datetime):
return np.datetime64(dt)
return dt
# constrict further by date
if start:
start = convert_datelike_to_long(start)
start_index += current_dates.searchsorted(start)
if end:
end = convert_datelike_to_long(end)
_end = current_dates.searchsorted(end, "right")
end_index -= len(current_dates) - _end
where = slice(start_index, end_index)
values = self.buffer.values[item_indexer, where, :]
current_dates = self.date_buf[where]
if raw:
# return copy so we can change it without side effects here
return values.copy()
major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz="utc")
if values.ndim == 3:
return pd.Panel(
values,
self.items,
major_axis,
self.minor_axis,
dtype=self.dtype,
)
elif values.ndim == 2:
return pd.DataFrame(
values, major_axis, self.minor_axis, dtype=self.dtype
)
def set_current(self, panel):
"""
Set the values stored in our current in-view data to be values of the
passed panel. The passed panel must have the same indices as the panel
that would be returned by self.get_current.
"""
where = slice(self._start_index, self._pos)
self.buffer.values[:, where, :] = panel.values
def current_dates(self):
where = slice(self._start_index, self._pos)
return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz="utc")
def _roll_data(self):
"""
Roll window worth of data up to position zero.
Save the effort of having to expensively roll at each iteration
"""
self.buffer.values[:, : self._window, :] = self.buffer.values[
:, -self._window :, :
]
self.date_buf[: self._window] = self.date_buf[-self._window :]
self._pos = self._window
@property
def window_length(self):
return self._window | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/data.py | data.py |
from functools import reduce
from operator import itemgetter
from pprint import pformat
from toolz import curry, flip
from .sentinel import sentinel
@curry
def apply(f, *args, **kwargs):
"""Apply a function to arguments.
Parameters
----------
f : callable
The function to call.
*args, **kwargs
**kwargs
Arguments to feed to the callable.
Returns
-------
a : any
The result of ``f(*args, **kwargs)``
Examples
--------
>>> from toolz.curried.operator import add, sub
>>> fs = add(1), sub(1)
>>> tuple(map(apply, fs, (1, 2)))
(2, -1)
Class decorator
>>> instance = apply
>>> @instance
... class obj:
... def f(self):
... return 'f'
...
>>> obj.f()
'f'
>>> issubclass(obj, object)
Traceback (most recent call last):
...
TypeError: issubclass() arg 1 must be a class
>>> isinstance(obj, type)
False
See Also
--------
unpack_apply
mapply
"""
return f(*args, **kwargs)
# Alias for use as a class decorator.
instance = apply
def mapall(funcs, seq):
"""
Parameters
----------
funcs : iterable[function]
Sequence of functions to map over `seq`.
seq : iterable
Sequence over which to map funcs.
Yields
------
elem : object
Concatenated result of mapping each ``func`` over ``seq``.
Examples
--------
>>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3]))
[2, 3, 4, 0, 1, 2]
"""
for func in funcs:
for elem in seq:
yield func(elem)
def same(*values):
"""
Check if all values in a sequence are equal.
Returns True on empty sequences.
Examples
--------
>>> same(1, 1, 1, 1)
True
>>> same(1, 2, 1)
False
>>> same()
True
"""
if not values:
return True
first, rest = values[0], values[1:]
return all(value == first for value in rest)
def _format_unequal_keys(dicts):
return pformat([sorted(d.keys()) for d in dicts])
def dzip_exact(*dicts):
"""
Parameters
----------
*dicts : iterable[dict]
A sequence of dicts all sharing the same keys.
Returns
-------
zipped : dict
A dict whose keys are the union of all keys in *dicts, and whose values
are tuples of length len(dicts) containing the result of looking up
each key in each dict.
Raises
------
ValueError
If dicts don't all have the same keys.
Examples
--------
>>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4})
>>> result == {'a': (1, 3), 'b': (2, 4)}
True
"""
if not same(*map(dict.keys, dicts)):
raise ValueError("dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts))
return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _gen_unzip(it, elem_len):
"""Helper for unzip which checks the lengths of each element in it.
Parameters
----------
it : iterable[tuple]
An iterable of tuples. ``unzip`` should map ensure that these are
already tuples.
elem_len : int or None
The expected element length. If this is None it is infered from the
length of the first element.
Yields
------
elem : tuple
Each element of ``it``.
Raises
------
ValueError
Raised when the lengths do not match the ``elem_len``.
"""
try:
elem = next(it)
except StopIteration:
return (), ()
first_elem_len = len(elem)
if elem_len is not None and elem_len != first_elem_len:
raise ValueError(
"element at index 0 was length %d, expected %d"
% (
first_elem_len,
elem_len,
)
)
else:
elem_len = first_elem_len
yield elem
for n, elem in enumerate(it, 1):
if len(elem) != elem_len:
raise ValueError(
"element at index %d was length %d, expected %d"
% (
n,
len(elem),
elem_len,
),
)
yield elem
def unzip(seq, elem_len=None):
"""Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
"""
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without 'elem_len'")
return ((),) * elem_len
_no_default = sentinel("_no_default")
def getattrs(value, attrs, default=_no_default):
"""
Perform a chained application of ``getattr`` on ``value`` with the values
in ``attrs``.
If ``default`` is supplied, return it if any of the attribute lookups fail.
Parameters
----------
value : object
Root of the lookup chain.
attrs : iterable[str]
Sequence of attributes to look up.
default : object, optional
Value to return if any of the lookups fail.
Returns
-------
result : object
Result of the lookup sequence.
Examples
--------
>>> class EmptyObject(object):
... pass
...
>>> obj = EmptyObject()
>>> obj.foo = EmptyObject()
>>> obj.foo.bar = "value"
>>> getattrs(obj, ('foo', 'bar'))
'value'
>>> getattrs(obj, ('foo', 'buzz'))
Traceback (most recent call last):
...
AttributeError: 'EmptyObject' object has no attribute 'buzz'
>>> getattrs(obj, ('foo', 'buzz'), 'default')
'default'
"""
try:
for attr in attrs:
value = getattr(value, attr)
except AttributeError:
if default is _no_default:
raise
value = default
return value
@curry
def set_attribute(name, value):
"""
Decorator factory for setting attributes on a function.
Doesn't change the behavior of the wrapped function.
Examples
--------
>>> @set_attribute('__name__', 'foo')
... def bar():
... return 3
...
>>> bar()
3
>>> bar.__name__
'foo'
"""
def decorator(f):
setattr(f, name, value)
return f
return decorator
# Decorators for setting the __name__ and __doc__ properties of a decorated
# function.
# Example:
with_name = set_attribute("__name__")
with_doc = set_attribute("__doc__")
def foldr(f, seq, default=_no_default):
"""Fold a function over a sequence with right associativity.
Parameters
----------
f : callable[any, any]
The function to reduce the sequence with.
The first argument will be the element of the sequence; the second
argument will be the accumulator.
seq : iterable[any]
The sequence to reduce.
default : any, optional
The starting value to reduce with. If not provided, the sequence
cannot be empty, and the last value of the sequence will be used.
Returns
-------
folded : any
The folded value.
Notes
-----
This functions works by reducing the list in a right associative way.
For example, imagine we are folding with ``operator.add`` or ``+``:
.. code-block:: python
foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default)))
In the more general case with an arbitrary function, ``foldr`` will expand
like so:
.. code-block:: python
foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default))))
For a more in depth discussion of left and right folds, see:
`https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_
The images in that page are very good for showing the differences between
``foldr`` and ``foldl`` (``reduce``).
.. note::
For performance reasons is is best to pass a strict (non-lazy) sequence,
for example, a list.
See Also
--------
:func:`functools.reduce`
:func:`sum`
"""
return reduce(
flip(f), reversed(seq), *(default,) if default is not _no_default else ()
)
def invert(d):
"""
Invert a dictionary into a dictionary of sets.
>>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP
{1: {'a', 'c'}, 2: {'b'}}
"""
out = {}
for k, v in d.items():
try:
out[v].add(k)
except KeyError:
out[v] = {k}
return out
def keysorted(d):
"""Get the items from a dict, sorted by key.
Example
-------
>>> keysorted({'c': 1, 'b': 2, 'a': 3})
[('a', 3), ('b', 2), ('c', 1)]
"""
return sorted(d.items(), key=itemgetter(0)) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/functional.py | functional.py |
import re
from textwrap import dedent
from toolz import curry
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC = dedent(
"""\
frequency : {'year_start', 'quarter_start', 'month_start', 'week_start'}
A string indicating desired sampling dates:
* 'year_start' -> first trading day of each year
* 'quarter_start' -> first trading day of January, April, July, October
* 'month_start' -> first trading day of each month
* 'week_start' -> first trading_day of each week
"""
)
PIPELINE_ALIAS_NAME_DOC = dedent(
"""\
name : str
The name to alias this term as.
""",
)
def pad_lines_after_first(prefix, s):
"""Apply a prefix to each line in s after the first."""
return ("\n" + prefix).join(s.splitlines())
def format_docstring(owner_name, docstring, formatters):
"""
Template ``formatters`` into ``docstring``.
Parameters
----------
owner_name : str
The name of the function or class whose docstring is being templated.
Only used for error messages.
docstring : str
The docstring to template.
formatters : dict[str -> str]
Parameters for a a str.format() call on ``docstring``.
Multi-line values in ``formatters`` will have leading whitespace padded
to match the leading whitespace of the substitution string.
"""
# Build a dict of parameters to a vanilla format() call by searching for
# each entry in **formatters and applying any leading whitespace to each
# line in the desired substitution.
format_params = {}
for target, doc_for_target in formatters.items():
# Search for '{name}', with optional leading whitespace.
regex = re.compile(r"^(\s*)" + "({" + target + "})$", re.MULTILINE)
matches = regex.findall(docstring)
if not matches:
raise ValueError(
"Couldn't find template for parameter {!r} in docstring "
"for {}."
"\nParameter name must be alone on a line surrounded by "
"braces.".format(target, owner_name),
)
elif len(matches) > 1:
raise ValueError(
"Couldn't found multiple templates for parameter {!r}"
"in docstring for {}."
"\nParameter should only appear once.".format(target, owner_name)
)
(leading_whitespace, _) = matches[0]
format_params[target] = pad_lines_after_first(
leading_whitespace,
doc_for_target,
)
return docstring.format(**format_params)
def templated_docstring(**docs):
"""
Decorator allowing the use of templated docstrings.
Examples
--------
>>> @templated_docstring(foo='bar')
... def my_func(self, foo):
... '''{foo}'''
...
>>> my_func.__doc__
'bar'
"""
def decorator(f):
f.__doc__ = format_docstring(f.__name__, f.__doc__, docs)
return f
return decorator
@curry
def copydoc(from_, to):
"""Copies the docstring from one function to another.
Parameters
----------
from_ : any
The object to copy the docstring from.
to : any
The object to copy the docstring to.
Returns
-------
to : any
``to`` with the docstring from ``from_``
"""
to.__doc__ = from_.__doc__
return to | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/sharedoc.py | sharedoc.py |
from abc import ABCMeta, abstractmethod
# Consistent error to be thrown in various cases regarding overriding
# `final` attributes.
_type_error = TypeError("Cannot override final attribute")
def bases_mro(bases):
"""
Yield classes in the order that methods should be looked up from the
base classes of an object.
"""
for base in bases:
for class_ in base.__mro__:
yield class_
def is_final(name, mro):
"""
Checks if `name` is a `final` object in the given `mro`.
We need to check the mro because we need to directly go into the __dict__
of the classes. Because `final` objects are descriptor, we need to grab
them _BEFORE_ the `__call__` is invoked.
"""
return any(
isinstance(getattr(c, "__dict__", {}).get(name), final) for c in bases_mro(mro)
)
class FinalMeta(type):
"""A metaclass template for classes the want to prevent subclassess from
overriding some methods or attributes.
"""
def __new__(mcls, name, bases, dict_):
for k, v in dict_.items():
if is_final(k, bases):
raise _type_error
setattr_ = dict_.get("__setattr__")
if setattr_ is None:
# No `__setattr__` was explicitly defined, look up the super
# class's. `bases[0]` will have a `__setattr__` because
# `object` does so we don't need to worry about the mro.
setattr_ = bases[0].__setattr__
if not is_final("__setattr__", bases) and not isinstance(setattr_, final):
# implicitly make the `__setattr__` a `final` object so that
# users cannot just avoid the descriptor protocol.
dict_["__setattr__"] = final(setattr_)
return super(FinalMeta, mcls).__new__(mcls, name, bases, dict_)
def __setattr__(self, name, value):
"""This stops the `final` attributes from being reassigned on the
class object.
"""
if is_final(name, self.__mro__):
raise _type_error
super(FinalMeta, self).__setattr__(name, value)
class final(object, metaclass=ABCMeta):
"""
An attribute that cannot be overridden.
This is like the final modifier in Java.
Example usage:
>>> class C(object, metaclass=FinalMeta):
... @final
... def f(self):
... return 'value'
...
This constructs a class with final method `f`. This cannot be overridden
on the class object or on any instance. You cannot override this by
subclassing `C`; attempting to do so will raise a `TypeError` at class
construction time.
"""
def __new__(cls, attr):
# Decide if this is a method wrapper or an attribute wrapper.
# We are going to cache the `callable` check by creating a
# method or attribute wrapper.
if hasattr(attr, "__get__"):
return object.__new__(finaldescriptor)
else:
return object.__new__(finalvalue)
def __init__(self, attr):
self._attr = attr
def __set__(self, instance, value):
"""
`final` objects cannot be reassigned. This is the most import concept
about `final`s.
Unlike a `property` object, this will raise a `TypeError` when you
attempt to reassign it.
"""
raise _type_error
@abstractmethod
def __get__(self, instance, owner):
raise NotImplementedError("__get__")
class finalvalue(final):
"""
A wrapper for a non-descriptor attribute.
"""
def __get__(self, instance, owner):
return self._attr
class finaldescriptor(final):
"""
A final wrapper around a descriptor.
"""
def __get__(self, instance, owner):
return self._attr.__get__(instance, owner) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/final.py | final.py |
import warnings
from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.errors import SymbolNotFound
from zipline.finance.asset_restrictions import SecurityListRestrictions
from zipline.zipline_warnings import ZiplineDeprecationWarning
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, "resources", "security_lists")
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted([pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
warnings.warn(
"Iterating over security_lists is deprecated. Use "
"`for sid in <security_list>.current_securities(dt)` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2,
)
return iter(self.current_securities(self.current_date()))
def __contains__(self, item):
warnings.warn(
"Evaluating inclusion in security_lists is deprecated. Use "
"`sid in <security_list>.current_securities(dt)` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2,
)
return item in self.current_securities(self.current_date())
def current_securities(self, dt):
for kd in self._knowledge_dates:
if dt < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date, changes["add"], self._current_set.add
)
self.update_current(
effective_date, changes["delete"], self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
try:
asset = self.asset_finder.lookup_symbol(
symbol, as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
except SymbolNotFound:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory("leveraged_etf_list"),
self.current_date_func,
asset_finder=self.asset_finder,
)
return self._leveraged_etf
@property
def restrict_leveraged_etfs(self):
return SecurityListRestrictions(self.leveraged_etf_list)
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/security_list.py | security_list.py |
from collections import OrderedDict
from datetime import datetime
from distutils.version import StrictVersion
from warnings import (
catch_warnings,
filterwarnings,
)
import numpy as np
from numpy import (
array_equal,
broadcast,
busday_count,
datetime64,
diff,
dtype,
empty,
flatnonzero,
hstack,
isnan,
nan,
vectorize,
where,
)
from numpy.lib.stride_tricks import as_strided
from toolz import flip
numpy_version = StrictVersion(np.__version__)
uint8_dtype = dtype("uint8")
bool_dtype = dtype("bool")
uint32_dtype = dtype("uint32")
uint64_dtype = dtype("uint64")
int64_dtype = dtype("int64")
float32_dtype = dtype("float32")
float64_dtype = dtype("float64")
complex128_dtype = dtype("complex128")
datetime64D_dtype = dtype("datetime64[D]")
datetime64ns_dtype = dtype("datetime64[ns]")
object_dtype = dtype("O")
# We use object arrays for strings.
categorical_dtype = object_dtype
make_datetime64ns = flip(datetime64, "ns")
make_datetime64D = flip(datetime64, "D")
# Array compare that works across versions of numpy
try:
assert_array_compare = np.testing.utils.assert_array_compare
except AttributeError:
assert_array_compare = np.testing.assert_array_compare
NaTmap = {
dtype("datetime64[%s]" % unit): datetime64("NaT", unit)
for unit in ("ns", "us", "ms", "s", "m", "D")
}
def NaT_for_dtype(dtype):
"""Retrieve NaT with the same units as ``dtype``.
Parameters
----------
dtype : dtype-coercable
The dtype to lookup the NaT value for.
Returns
-------
NaT : dtype
The NaT value for the given dtype.
"""
return NaTmap[np.dtype(dtype)]
NaTns = NaT_for_dtype(datetime64ns_dtype)
NaTD = NaT_for_dtype(datetime64D_dtype)
_FILLVALUE_DEFAULTS = {
bool_dtype: False,
float32_dtype: nan,
float64_dtype: nan,
datetime64ns_dtype: NaTns,
object_dtype: None,
}
INT_DTYPES_BY_SIZE_BYTES = OrderedDict(
[
(1, dtype("int8")),
(2, dtype("int16")),
(4, dtype("int32")),
(8, dtype("int64")),
]
)
UNSIGNED_INT_DTYPES_BY_SIZE_BYTES = OrderedDict(
[
(1, dtype("uint8")),
(2, dtype("uint16")),
(4, dtype("uint32")),
(8, dtype("uint64")),
]
)
def int_dtype_with_size_in_bytes(size):
try:
return INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError("No integral dtype whose size is %d bytes." % size)
def unsigned_int_dtype_with_size_in_bytes(size):
try:
return UNSIGNED_INT_DTYPES_BY_SIZE_BYTES[size]
except KeyError:
raise ValueError("No unsigned integral dtype whose size is %d bytes." % size)
class NoDefaultMissingValue(Exception):
pass
def make_kind_check(python_types, numpy_kind):
"""
Make a function that checks whether a scalar or array is of a given kind
(e.g. float, int, datetime, timedelta).
"""
def check(value):
if hasattr(value, "dtype"):
return value.dtype.kind == numpy_kind
return isinstance(value, python_types)
return check
is_float = make_kind_check(float, "f")
is_int = make_kind_check(int, "i")
is_datetime = make_kind_check(datetime, "M")
is_object = make_kind_check(object, "O")
def coerce_to_dtype(dtype, value):
"""
Make a value with the specified numpy dtype.
Only datetime64[ns] and datetime64[D] are supported for datetime dtypes.
"""
name = dtype.name
if name.startswith("datetime64"):
if name == "datetime64[D]":
return make_datetime64D(value)
elif name == "datetime64[ns]":
return make_datetime64ns(value)
else:
raise TypeError("Don't know how to coerce values of dtype %s" % dtype)
return dtype.type(value)
def default_missing_value_for_dtype(dtype):
"""
Get the default fill value for `dtype`.
"""
try:
return _FILLVALUE_DEFAULTS[dtype]
except KeyError:
raise NoDefaultMissingValue("No default value registered for dtype %s." % dtype)
def repeat_first_axis(array, count):
"""
Restride `array` to repeat `count` times along the first axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape (count,) + array.shape, composed of `array` repeated
`count` times along the first axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_first_axis(a, 2)
array([[0, 1, 2],
[0, 1, 2]])
>>> repeat_first_axis(a, 4)
array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, (count,) + array.shape, (0,) + array.strides)
def repeat_last_axis(array, count):
"""
Restride `array` to repeat `count` times along the last axis.
Parameters
----------
array : np.array
The array to restride.
count : int
Number of times to repeat `array`.
Returns
-------
result : array
Array of shape array.shape + (count,) composed of `array` repeated
`count` times along the last axis.
Example
-------
>>> from numpy import arange
>>> a = arange(3); a
array([0, 1, 2])
>>> repeat_last_axis(a, 2)
array([[0, 0],
[1, 1],
[2, 2]])
>>> repeat_last_axis(a, 4)
array([[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]])
Notes
----
The resulting array will share memory with `array`. If you need to assign
to the input or output, you should probably make a copy first.
See Also
--------
repeat_last_axis
"""
return as_strided(array, array.shape + (count,), array.strides + (0,))
def rolling_window(array, length):
"""
Restride an array of shape
(X_0, ... X_N)
into an array of shape
(length, X_0 - length + 1, ... X_N)
where each slice at index i along the first axis is equivalent to
result[i] = array[length * i:length * (i + 1)]
Parameters
----------
array : np.ndarray
The base array.
length : int
Length of the synthetic first axis to generate.
Returns
-------
out : np.ndarray
Example
-------
>>> from numpy import arange
>>> a = arange(25).reshape(5, 5)
>>> a
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> rolling_window(a, 2)
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9]],
<BLANKLINE>
[[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]],
<BLANKLINE>
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
<BLANKLINE>
[[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]]])
"""
orig_shape = array.shape
if not orig_shape:
raise IndexError("Can't restride a scalar.")
elif orig_shape[0] <= length:
raise IndexError(
"Can't restride array of shape {shape} with"
" a window length of {len}".format(
shape=orig_shape,
len=length,
)
)
num_windows = orig_shape[0] - length + 1
new_shape = (num_windows, length) + orig_shape[1:]
new_strides = (array.strides[0],) + array.strides
return as_strided(array, new_shape, new_strides)
# Sentinel value that isn't NaT.
_notNaT = make_datetime64D(0)
iNaT = int(NaTns.view(int64_dtype))
assert iNaT == NaTD.view(int64_dtype), "iNaTns != iNaTD"
def isnat(obj):
"""
Check if a value is np.NaT.
"""
if obj.dtype.kind not in ("m", "M"):
raise ValueError("%s is not a numpy datetime or timedelta")
return obj.view(int64_dtype) == iNaT
def is_missing(data, missing_value):
"""
Generic is_missing function that handles NaN and NaT.
"""
if is_float(data) and isnan(missing_value):
return isnan(data)
elif is_datetime(data) and isnat(missing_value):
return isnat(data)
elif is_object(data) and missing_value is None:
# XXX: Older versions of numpy returns True/False for array ==
# None. Work around this by boxing None in a 1x1 array, which causes
# numpy to do the broadcasted comparison we want.
return data == np.array([missing_value])
return data == missing_value
def same(x, y):
"""
Check if two scalar values are "the same".
Returns True if `x == y`, or if x and y are both NaN or both NaT.
"""
if is_float(x) and isnan(x) and is_float(y) and isnan(y):
return True
elif is_datetime(x) and isnat(x) and is_datetime(y) and isnat(y):
return True
else:
return x == y
def busday_count_mask_NaT(begindates, enddates, out=None):
"""
Simple of numpy.busday_count that returns `float` arrays rather than int
arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`.
Doesn't support custom weekdays or calendars, but probably should in the
future.
See Also
--------
np.busday_count
"""
if out is None:
out = empty(broadcast(begindates, enddates).shape, dtype=float)
beginmask = isnat(begindates)
endmask = isnat(enddates)
out = busday_count(
# Temporarily fill in non-NaT values.
where(beginmask, _notNaT, begindates),
where(endmask, _notNaT, enddates),
out=out,
)
# Fill in entries where either comparison was NaT with nan in the output.
out[beginmask | endmask] = nan
return out
class WarningContext(object):
"""
Re-usable contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return self
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
("ignore",),
{"category": RuntimeWarning, "module": "numpy.lib.nanfunctions"},
)
)
def vectorized_is_element(array, choices):
"""
Check if each element of ``array`` is in choices.
Parameters
----------
array : np.ndarray
choices : object
Object implementing __contains__.
Returns
-------
was_element : np.ndarray[bool]
Array indicating whether each element of ``array`` was in ``choices``.
"""
return vectorize(choices.__contains__, otypes=[bool])(array)
def as_column(a):
"""
Convert an array of shape (N,) into an array of shape (N, 1).
This is equivalent to `a[:, np.newaxis]`.
Parameters
----------
a : np.ndarray
Example
-------
>>> import numpy as np
>>> a = np.arange(5)
>>> a
array([0, 1, 2, 3, 4])
>>> as_column(a)
array([[0],
[1],
[2],
[3],
[4]])
>>> as_column(a).shape
(5, 1)
"""
if a.ndim != 1:
raise ValueError(
"as_column expected an 1-dimensional array, "
"but got an array of shape %s" % (a.shape,)
)
return a[:, None]
def changed_locations(a, include_first):
"""
Compute indices of values in ``a`` that differ from the previous value.
Parameters
----------
a : np.ndarray
The array on which to indices of change.
include_first : bool
Whether or not to consider the first index of the array as "changed".
Example
-------
>>> import numpy as np
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False)
array([2, 4])
>>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True)
array([0, 2, 4])
"""
if a.ndim > 1:
raise ValueError("indices_of_changed_values only supports 1D arrays.")
indices = flatnonzero(diff(a)) + 1
if not include_first:
return indices
return hstack([[0], indices])
def compare_datetime_arrays(x, y):
"""
Compare datetime64 ndarrays, treating NaT values as equal.
"""
return array_equal(x.view("int64"), y.view("int64"))
def bytes_array_to_native_str_object_array(a):
"""Convert an array of dtype S to an object array containing `str`."""
return a.astype(str).astype(object) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/numpy_utils.py | numpy_utils.py |
from collections.abc import MutableMapping
import errno
from functools import partial
import os
import pickle
from distutils import dir_util
from shutil import rmtree, move
from tempfile import mkdtemp, NamedTemporaryFile
import pandas as pd
from .context_tricks import nop_context
from .paths import ensure_directory
from .sentinel import sentinel
class Expired(Exception):
"""Marks that a :class:`CachedObject` has expired."""
ExpiredCachedObject = sentinel("ExpiredCachedObject")
AlwaysExpired = sentinel("AlwaysExpired")
class CachedObject(object):
"""
A simple struct for maintaining a cached object with an expiration date.
Parameters
----------
value : object
The object to cache.
expires : datetime-like
Expiration date of `value`. The cache is considered invalid for dates
**strictly greater** than `expires`.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> obj = CachedObject(1, expires)
>>> obj.unwrap(expires - Timedelta('1 minute'))
1
>>> obj.unwrap(expires)
1
>>> obj.unwrap(expires + Timedelta('1 minute'))
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Expired: 2014-01-01 00:00:00+00:00
"""
def __init__(self, value, expires):
self._value = value
self._expires = expires
@classmethod
def expired(cls):
"""Construct a CachedObject that's expired at any time."""
return cls(ExpiredCachedObject, expires=AlwaysExpired)
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value
def _unsafe_get_value(self):
"""You almost certainly shouldn't use this."""
return self._value
class ExpiringCache(object):
"""
A cache of multiple CachedObjects, which returns the wrapped the value
or raises and deletes the CachedObject if the value has expired.
Parameters
----------
cache : dict-like, optional
An instance of a dict-like object which needs to support at least:
`__del__`, `__getitem__`, `__setitem__`
If `None`, than a dict is used as a default.
cleanup : callable, optional
A method that takes a single argument, a cached object, and is called
upon expiry of the cached object, prior to deleting the object. If not
provided, defaults to a no-op.
Examples
--------
>>> from pandas import Timestamp, Timedelta
>>> expires = Timestamp('2014', tz='UTC')
>>> value = 1
>>> cache = ExpiringCache()
>>> cache.set('foo', value, expires)
>>> cache.get('foo', expires - Timedelta('1 minute'))
1
>>> cache.get('foo', expires + Timedelta('1 minute'))
Traceback (most recent call last):
...
KeyError: 'foo'
"""
def __init__(self, cache=None, cleanup=lambda value_to_clean: None):
if cache is not None:
self._cache = cache
else:
self._cache = {}
self.cleanup = cleanup
def get(self, key, dt):
"""Get the value of a cached object.
Parameters
----------
key : any
The key to lookup.
dt : datetime
The time of the lookup.
Returns
-------
result : any
The value for ``key``.
Raises
------
KeyError
Raised if the key is not in the cache or the value for the key
has expired.
"""
try:
return self._cache[key].unwrap(dt)
except Expired:
self.cleanup(self._cache[key]._unsafe_get_value())
del self._cache[key]
raise KeyError(key)
def set(self, key, value, expiration_dt):
"""Adds a new key value pair to the cache.
Parameters
----------
key : any
The key to use for the pair.
value : any
The value to store under the name ``key``.
expiration_dt : datetime
When should this mapping expire? The cache is considered invalid
for dates **strictly greater** than ``expiration_dt``.
"""
self._cache[key] = CachedObject(value, expiration_dt)
class dataframe_cache(MutableMapping):
"""A disk-backed cache for dataframes.
``dataframe_cache`` is a mutable mapping from string names to pandas
DataFrame objects.
This object may be used as a context manager to delete the cache directory
on exit.
Parameters
----------
path : str, optional
The directory path to the cache. Files will be written as
``path/<keyname>``.
lock : Lock, optional
Thread lock for multithreaded/multiprocessed access to the cache.
If not provided no locking will be used.
clean_on_failure : bool, optional
Should the directory be cleaned up if an exception is raised in the
context manager.
serialize : {'msgpack', 'pickle:<n>'}, optional
How should the data be serialized. If ``'pickle'`` is passed, an
optional pickle protocol can be passed like: ``'pickle:3'`` which says
to use pickle protocol 3.
Notes
-----
The syntax ``cache[:]`` will load all key:value pairs into memory as a
dictionary.
The cache uses a temporary file format that is subject to change between
versions of zipline.
"""
def __init__(
self, path=None, lock=None, clean_on_failure=True, serialization="pickle"
):
self.path = path if path is not None else mkdtemp()
self.lock = lock if lock is not None else nop_context
self.clean_on_failure = clean_on_failure
if serialization == "msgpack":
self.serialize = pd.DataFrame.to_msgpack
self.deserialize = pd.read_msgpack
self._protocol = None
else:
s = serialization.split(":", 1)
if s[0] != "pickle":
raise ValueError(
"'serialization' must be either 'msgpack' or 'pickle[:n]'",
)
self._protocol = int(s[1]) if len(s) == 2 else None
self.serialize = self._serialize_pickle
self.deserialize = partial(pickle.load, encoding="latin-1")
ensure_directory(self.path)
def _serialize_pickle(self, df, path):
with open(path, "wb") as f:
pickle.dump(df, f, protocol=self._protocol)
def _keypath(self, key):
return os.path.join(self.path, key)
def __enter__(self):
return self
def __exit__(self, type_, value, tb):
if not (self.clean_on_failure or value is None):
# we are not cleaning up after a failure and there was an exception
return
with self.lock:
rmtree(self.path)
def __getitem__(self, key):
if key == slice(None):
return dict(self.items())
with self.lock:
try:
with open(self._keypath(key), "rb") as f:
return self.deserialize(f)
except IOError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(key)
def __setitem__(self, key, value):
with self.lock:
self.serialize(value, self._keypath(key))
def __delitem__(self, key):
with self.lock:
try:
os.remove(self._keypath(key))
except OSError as e:
if e.errno == errno.ENOENT:
# raise a keyerror if this directory did not exist
raise KeyError(key)
# reraise the actual oserror otherwise
raise
def __iter__(self):
return iter(os.listdir(self.path))
def __len__(self):
return len(os.listdir(self.path))
def __repr__(self):
return "<%s: keys={%s}>" % (
type(self).__name__,
", ".join(map(repr, sorted(self))),
)
class working_file(object):
"""A context manager for managing a temporary file that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to NamedTemporaryFile.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_file`` uses :func:`shutil.move` to move the actual files,
meaning it has as strong of guarantees as :func:`shutil.move`.
"""
def __init__(self, final_path, *args, **kwargs):
self._tmpfile = NamedTemporaryFile(delete=False, *args, **kwargs)
self._final_path = final_path
@property
def path(self):
"""Alias for ``name`` to be consistent with
:class:`~zipline.utils.cache.working_dir`.
"""
return self._tmpfile.name
def _commit(self):
"""Sync the temporary file to the final path."""
move(self.path, self._final_path)
def __enter__(self):
self._tmpfile.__enter__()
return self
def __exit__(self, *exc_info):
self._tmpfile.__exit__(*exc_info)
if exc_info[0] is None:
self._commit()
class working_dir(object):
"""A context manager for managing a temporary directory that will be moved
to a non-temporary location if no exceptions are raised in the context.
Parameters
----------
final_path : str
The location to move the file when committing.
*args, **kwargs
Forwarded to tmp_dir.
Notes
-----
The file is moved on __exit__ if there are no exceptions.
``working_dir`` uses :func:`dir_util.copy_tree` to move the actual files,
meaning it has as strong of guarantees as :func:`dir_util.copy_tree`.
"""
def __init__(self, final_path, *args, **kwargs):
self.path = mkdtemp()
self._final_path = final_path
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path
def getpath(self, *path_parts):
"""Get a path relative to the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
return os.path.join(self.path, *path_parts)
def _commit(self):
"""Sync the temporary directory to the final path."""
dir_util.copy_tree(self.path, self._final_path)
def __enter__(self):
return self
def __exit__(self, *exc_info):
if exc_info[0] is None:
self._commit()
rmtree(self.path) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/cache.py | cache.py |
from contextlib import contextmanager
from copy import deepcopy
from itertools import product
import operator as op
import warnings
import numpy as np
import pandas as pd
from distutils.version import StrictVersion
from zipline.utils.calendar_utils import days_at_time
from pandas.errors import PerformanceWarning
pandas_version = StrictVersion(pd.__version__)
new_pandas = pandas_version >= StrictVersion("0.19")
skip_pipeline_new_pandas = (
"Pipeline categoricals are not yet compatible with pandas >=0.19"
)
skip_pipeline_blaze = "Blaze doesn't play nicely with Pandas >=1.0"
def normalize_date(dt):
"""
Normalize datetime.datetime value to midnight. Returns datetime.date as
a datetime.datetime at midnight
Returns
-------
normalized : datetime.datetime or Timestamp
"""
return dt.normalize()
def july_5th_holiday_observance(datetime_index):
return datetime_index[datetime_index.year != 2013]
def explode(df):
"""
Take a DataFrame and return a triple of
(df.index, df.columns, df.values)
"""
return df.index, df.columns, df.values
def _time_to_micros(time):
"""Convert a time into microseconds since midnight.
Parameters
----------
time : datetime.time
The time to convert.
Returns
-------
us : int
The number of microseconds since midnight.
Notes
-----
This does not account for leap seconds or daylight savings.
"""
seconds = time.hour * 60 * 60 + time.minute * 60 + time.second
return 1000000 * seconds + time.microsecond
_opmap = dict(
zip(
product((True, False), repeat=3),
product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)),
)
)
def mask_between_time(dts, start, end, include_start=True, include_end=True):
"""Return a mask of all of the datetimes in ``dts`` that are between
``start`` and ``end``.
Parameters
----------
dts : pd.DatetimeIndex
The index to mask.
start : time
Mask away times less than the start.
end : time
Mask away times greater than the end.
include_start : bool, optional
Inclusive on ``start``.
include_end : bool, optional
Inclusive on ``end``.
Returns
-------
mask : np.ndarray[bool]
A bool array masking ``dts``.
See Also
--------
:meth:`pandas.DatetimeIndex.indexer_between_time`
"""
# This function is adapted from
# `pandas.Datetime.Index.indexer_between_time` which was originally
# written by Wes McKinney, Chang She, and Grant Roch.
time_micros = dts._get_time_micros()
start_micros = _time_to_micros(start)
end_micros = _time_to_micros(end)
left_op, right_op, join_op = _opmap[
bool(include_start),
bool(include_end),
start_micros <= end_micros,
]
return join_op(
left_op(start_micros, time_micros),
right_op(time_micros, end_micros),
)
def find_in_sorted_index(dts, dt):
"""
Find the index of ``dt`` in ``dts``.
This function should be used instead of `dts.get_loc(dt)` if the index is
large enough that we don't want to initialize a hash table in ``dts``. In
particular, this should always be used on minutely trading calendars.
Parameters
----------
dts : pd.DatetimeIndex
Index in which to look up ``dt``. **Must be sorted**.
dt : pd.Timestamp
``dt`` to be looked up.
Returns
-------
ix : int
Integer index such that dts[ix] == dt.
Raises
------
KeyError
If dt is not in ``dts``.
"""
ix = dts.searchsorted(dt)
if ix == len(dts) or dts[ix] != dt:
raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts))
return ix
def nearest_unequal_elements(dts, dt):
"""
Find values in ``dts`` closest but not equal to ``dt``.
Returns a pair of (last_before, first_after).
When ``dt`` is less than any element in ``dts``, ``last_before`` is None.
When ``dt`` is greater any element in ``dts``, ``first_after`` is None.
``dts`` must be unique and sorted in increasing order.
Parameters
----------
dts : pd.DatetimeIndex
Dates in which to search.
dt : pd.Timestamp
Date for which to find bounds.
"""
if not dts.is_unique:
raise ValueError("dts must be unique")
if not dts.is_monotonic_increasing:
raise ValueError("dts must be sorted in increasing order")
if not len(dts):
return None, None
sortpos = dts.searchsorted(dt, side="left")
try:
sortval = dts[sortpos]
except IndexError:
# dt is greater than any value in the array.
return dts[-1], None
if dt < sortval:
lower_ix = sortpos - 1
upper_ix = sortpos
elif dt == sortval:
lower_ix = sortpos - 1
upper_ix = sortpos + 1
else:
lower_ix = sortpos
upper_ix = sortpos + 1
lower_value = dts[lower_ix] if lower_ix >= 0 else None
upper_value = dts[upper_ix] if upper_ix < len(dts) else None
return lower_value, upper_value
def timedelta_to_integral_seconds(delta):
"""
Convert a pd.Timedelta to a number of seconds as an int.
"""
return int(delta.total_seconds())
def timedelta_to_integral_minutes(delta):
"""
Convert a pd.Timedelta to a number of minutes as an int.
"""
return timedelta_to_integral_seconds(delta) // 60
@contextmanager
def ignore_pandas_nan_categorical_warning():
with warnings.catch_warnings():
# Pandas >= 0.18 doesn't like null-ish values in categories, but
# avoiding that requires a broader change to how missing values are
# handled in pipeline, so for now just silence the warning.
warnings.filterwarnings(
"ignore",
category=FutureWarning,
)
yield
def categorical_df_concat(df_list, inplace=False):
"""
Prepare list of pandas DataFrames to be used as input to pd.concat.
Ensure any columns of type 'category' have the same categories across each
dataframe.
Parameters
----------
df_list : list
List of dataframes with same columns.
inplace : bool
True if input list can be modified. Default is False.
Returns
-------
concatenated : df
Dataframe of concatenated list.
"""
if not inplace:
df_list = deepcopy(df_list)
# Assert each dataframe has the same columns/dtypes
df = df_list[0]
if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]):
raise ValueError("Input DataFrames must have the same columns/dtypes.")
categorical_columns = df.columns[df.dtypes == "category"]
for col in categorical_columns:
new_categories = _sort_set_none_first(
_union_all(frame[col].cat.categories for frame in df_list)
)
with ignore_pandas_nan_categorical_warning():
for df in df_list:
df[col].cat.set_categories(new_categories, inplace=True)
return pd.concat(df_list)
def _union_all(iterables):
"""Union entries in ``iterables`` into a set."""
return set().union(*iterables)
def _sort_set_none_first(set_):
"""Sort a set, sorting ``None`` before other elements, if present."""
if None in set_:
set_.remove(None)
out = [None]
out.extend(sorted(set_))
set_.add(None)
return out
else:
return sorted(set_)
def empty_dataframe(*columns):
"""Create an empty dataframe with columns of particular types.
Parameters
----------
*columns
The (column_name, column_dtype) pairs.
Returns
-------
typed_dataframe : pd.DataFrame
The empty typed dataframe.
Examples
--------
>>> df = empty_dataframe(
... ('a', 'int64'),
... ('b', 'float64'),
... ('c', 'datetime64[ns]'),
... )
>>> df
Empty DataFrame
Columns: [a, b, c]
Index: []
df.dtypes
a int64
b float64
c datetime64[ns]
dtype: object
"""
return pd.DataFrame(np.array([], dtype=list(columns)))
def check_indexes_all_same(indexes, message="Indexes are not equal."):
"""Check that a list of Index objects are all equal.
Parameters
----------
indexes : iterable[pd.Index]
Iterable of indexes to check.
Raises
------
ValueError
If the indexes are not all the same.
"""
iterator = iter(indexes)
first = next(iterator)
for other in iterator:
same = first == other
if not same.all():
bad_loc = np.flatnonzero(~same)[0]
raise ValueError(
"{}\nFirst difference is at index {}: "
"{} != {}".format(message, bad_loc, first[bad_loc], other[bad_loc]),
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/pandas_utils.py | pandas_utils.py |
from errno import EEXIST
import os
from os.path import exists, expanduser, join
import pandas as pd
def hidden(path):
"""Check if a path is hidden.
Parameters
----------
path : str
A filepath.
"""
return os.path.split(path)[1].startswith(".")
def ensure_directory(path):
"""
Ensure that a directory named "path" exists.
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == EEXIST and os.path.isdir(path):
return
raise
def ensure_directory_containing(path):
"""
Ensure that the directory containing `path` exists.
This is just a convenience wrapper for doing::
ensure_directory(os.path.dirname(path))
"""
ensure_directory(os.path.dirname(path))
def ensure_file(path):
"""
Ensure that a file exists. This will create any parent directories needed
and create an empty file if it does not exist.
Parameters
----------
path : str
The file path to ensure exists.
"""
ensure_directory_containing(path)
open(path, "a+").close() # touch the file
def update_modified_time(path, times=None):
"""
Updates the modified time of an existing file. This will create any
parent directories needed and create an empty file if it does not exist.
Parameters
----------
path : str
The file path to update.
times : tuple
A tuple of size two; access time and modified time
"""
ensure_directory_containing(path)
os.utime(path, times)
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit="s", tz="UTC")
def modified_since(path, dt):
"""
Check whether `path` was modified since `dt`.
Returns False if path doesn't exist.
Parameters
----------
path : str
Path to the file to be checked.
dt : pd.Timestamp
The date against which to compare last_modified_time(path).
Returns
-------
was_modified : bool
Will be ``False`` if path doesn't exists, or if its last modified date
is earlier than or equal to `dt`
"""
return exists(path) and last_modified_time(path) > dt
def zipline_root(environ=None):
"""
Get the root directory for all zipline-managed files.
For testing purposes, this accepts a dictionary to interpret as the os
environment.
Parameters
----------
environ : dict, optional
A dict to interpret as the os environment.
Returns
-------
root : string
Path to the zipline root dir.
"""
if environ is None:
environ = os.environ
root = environ.get("ZIPLINE_ROOT", None)
if root is None:
root = expanduser("~/.zipline")
return root
def zipline_path(paths, environ=None):
"""
Get a path relative to the zipline root.
Parameters
----------
paths : list[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline root.
"""
return join(zipline_root(environ=environ), *paths)
def default_extension(environ=None):
"""
Get the path to the default zipline extension file.
Parameters
----------
environ : dict, optional
An environment dict to forwart to zipline_root.
Returns
-------
default_extension_path : str
The file path to the default zipline extension file.
"""
return zipline_path(["extension.py"], environ=environ)
def data_root(environ=None):
"""
The root directory for zipline data files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
data_root : str
The zipline data root.
"""
return zipline_path(["data"], environ=environ)
def ensure_data_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(data_root(environ=environ))
def data_path(paths, environ=None):
"""
Get a path relative to the zipline data directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline data root.
"""
return zipline_path(["data"] + list(paths), environ=environ)
def cache_root(environ=None):
"""
The root directory for zipline cache files.
Parameters
----------
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
cache_root : str
The zipline cache root.
"""
return zipline_path(["cache"], environ=environ)
def ensure_cache_root(environ=None):
"""
Ensure that the data root exists.
"""
ensure_directory(cache_root(environ=environ))
def cache_path(paths, environ=None):
"""
Get a path relative to the zipline cache directory.
Parameters
----------
paths : iterable[str]
List of requested path pieces.
environ : dict, optional
An environment dict to forward to zipline_root.
Returns
-------
newpath : str
The requested path joined with the zipline cache root.
"""
return zipline_path(["cache"] + list(paths), environ=environ) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/paths.py | paths.py |
from collections import namedtuple
from itertools import chain, zip_longest
from zipline.errors import ZiplineError
from zipline.utils.compat import getargspec
Argspec = namedtuple("Argspec", ["args", "starargs", "kwargs"])
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class Ignore(object):
def __str__(self):
return "Argument.ignore"
__repr__ = __str__
@singleton
class NoDefault(object):
def __str__(self):
return "Argument.no_default"
__repr__ = __str__
@singleton
class AnyDefault(object):
def __str__(self):
return "Argument.any_default"
__repr__ = __str__
class Argument(namedtuple("Argument", ["name", "default"])):
"""
An argument to a function.
Argument.no_default is a value representing no default to the argument.
Argument.ignore is a value that says you should ignore the default value.
"""
no_default = NoDefault()
any_default = AnyDefault()
ignore = Ignore()
def __new__(cls, name=ignore, default=ignore):
return super(Argument, cls).__new__(cls, name, default)
def __str__(self):
if self.has_no_default(self) or self.ignore_default(self):
return str(self.name)
else:
return "=".join([str(self.name), str(self.default)])
def __repr__(self):
return "Argument(%s, %s)" % (repr(self.name), repr(self.default))
def _defaults_match(self, arg):
return (
any(map(Argument.ignore_default, [self, arg]))
or (
self.default is Argument.any_default
and arg.default is not Argument.no_default
)
or (
arg.default is Argument.any_default
and self.default is not Argument.no_default
)
or self.default == arg.default
)
def _names_match(self, arg):
return (
self.name == arg.name
or self.name is Argument.ignore
or arg.name is Argument.ignore
)
def matches(self, arg):
return self._names_match(arg) and self._defaults_match(arg)
__eq__ = matches
@staticmethod
def parse_argspec(callable_):
"""
Takes a callable and returns a tuple with the list of Argument objects,
the name of *args, and the name of **kwargs.
If *args or **kwargs is not present, it will be None.
This returns a namedtuple called Argspec that has three fields named:
args, starargs, and kwargs.
"""
args, varargs, keywords, defaults = getargspec(callable_)
defaults = list(defaults or [])
if getattr(callable_, "__self__", None) is not None:
# This is a bound method, drop the self param.
args = args[1:]
first_default = len(args) - len(defaults)
return Argspec(
[
Argument(
arg,
Argument.no_default
if n < first_default
else defaults[n - first_default],
)
for n, arg in enumerate(args)
],
varargs,
keywords,
)
@staticmethod
def has_no_default(arg):
return arg.default is Argument.no_default
@staticmethod
def ignore_default(arg):
return arg.default is Argument.ignore
def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args):
"""
Checks for the presence of an extra to the argument list. Raises expections
if this is unexpected or if it is missing and expected.
"""
if present:
if not expected:
raise exc_unexpected(*exc_args)
elif expected and expected is not Argument.ignore:
raise exc_missing(*exc_args)
def verify_callable_argspec(
callable_,
expected_args=Argument.ignore,
expect_starargs=Argument.ignore,
expect_kwargs=Argument.ignore,
):
"""
Checks the callable_ to make sure that it satisfies the given
expectations.
expected_args should be an iterable of Arguments in the order you expect to
receive them.
expect_starargs means that the function should or should not take a *args
param. expect_kwargs says the callable should or should not take **kwargs
param.
If expected_args, expect_starargs, or expect_kwargs is Argument.ignore,
then the checks related to that argument will not occur.
Example usage:
callable_check(
f,
[Argument('a'), Argument('b', 1)],
expect_starargs=True,
expect_kwargs=Argument.ignore
)
"""
if not callable(callable_):
raise NotCallable(callable_)
expected_arg_list = list(
expected_args if expected_args is not Argument.ignore else []
)
args, starargs, kwargs = Argument.parse_argspec(callable_)
exc_args = callable_, args, starargs, kwargs
# Check the *args.
_expect_extra(
expect_starargs,
starargs,
UnexpectedStarargs,
NoStarargs,
exc_args,
)
# Check the **kwargs.
_expect_extra(
expect_kwargs,
kwargs,
UnexpectedKwargs,
NoKwargs,
exc_args,
)
if expected_args is Argument.ignore:
# Ignore the argument list checks.
return
if len(args) < len(expected_arg_list):
# One or more argument that we expected was not present.
raise NotEnoughArguments(
callable_,
args,
starargs,
kwargs,
[arg for arg in expected_arg_list if arg not in args],
)
elif len(args) > len(expected_arg_list):
raise TooManyArguments(callable_, args, starargs, kwargs)
# Empty argument that will not match with any actual arguments.
missing_arg = Argument(object(), object())
for expected, provided in zip_longest(
expected_arg_list, args, fillvalue=missing_arg
):
if not expected.matches(provided):
raise MismatchedArguments(callable_, args, starargs, kwargs)
class BadCallable(TypeError, AssertionError, ZiplineError):
"""
The given callable is not structured in the expected way.
"""
_lambda_name = (lambda: None).__name__
def __init__(self, callable_, args, starargs, kwargs):
self.callable_ = callable_
self.args = args
self.starargs = starargs
self.kwargsname = kwargs
self.kwargs = {}
def format_callable(self):
if self.callable_.__name__ == self._lambda_name:
fmt = "%s %s"
name = "lambda"
else:
fmt = "%s(%s)"
name = self.callable_.__name__
return fmt % (
name,
", ".join(
chain(
(str(arg) for arg in self.args),
("*" + sa for sa in (self.starargs,) if sa is not None),
("**" + ka for ka in (self.kwargsname,) if ka is not None),
)
),
)
@property
def msg(self):
return str(self)
class NoStarargs(BadCallable):
def __str__(self):
return "%s does not allow for *args" % self.format_callable()
class UnexpectedStarargs(BadCallable):
def __str__(self):
return "%s should not allow for *args" % self.format_callable()
class NoKwargs(BadCallable):
def __str__(self):
return "%s does not allow for **kwargs" % self.format_callable()
class UnexpectedKwargs(BadCallable):
def __str__(self):
return "%s should not allow for **kwargs" % self.format_callable()
class NotCallable(BadCallable):
"""
The provided 'callable' is not actually a callable.
"""
def __init__(self, callable_):
self.callable_ = callable_
def __str__(self):
return "%s is not callable" % self.format_callable()
def format_callable(self):
try:
return self.callable_.__name__
except AttributeError:
return str(self.callable_)
class NotEnoughArguments(BadCallable):
"""
The callback does not accept enough arguments.
"""
def __init__(self, callable_, args, starargs, kwargs, missing_args):
super(NotEnoughArguments, self).__init__(callable_, args, starargs, kwargs)
self.missing_args = missing_args
def __str__(self):
missing_args = list(map(str, self.missing_args))
return "%s is missing argument%s: %s" % (
self.format_callable(),
"s" if len(missing_args) > 1 else "",
", ".join(missing_args),
)
class TooManyArguments(BadCallable):
"""
The callback cannot be called by passing the expected number of arguments.
"""
def __str__(self):
return "%s accepts too many arguments" % self.format_callable()
class MismatchedArguments(BadCallable):
"""
The argument lists are of the same lengths, but not in the correct order.
"""
def __str__(self):
return "%s accepts mismatched parameters" % self.format_callable() | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/argcheck.py | argcheck.py |
from pytz import UTC
import pandas as pd
PANDAS_VERSION = pd.__version__
# NOTE:
# trading-calendars is no longer maintained and does not support pandas > 1.2.5.
# exchange-calendars is a fork that retained the same functionalities,
# but dropped support for zipline 1 minute delay in open and changed some default settings in calendars.
#
# We resort here to monkey patching the `_fabricate` function of the ExchangeCalendarDispatcher
# and importing `ExchangeCalendar as TradingCalendar` to get as close as possible to the
# behavior expected by zipline, while also maintaining the possibility to revert back
# to pandas==1.2.5 and trading-calendars in case something breaks heavily.
#
# In order to avoid problems, especially when using the exchange-calendars,
# all imports should be done via `calendar_utils`, e.g:
# `from zipline.utils.calendar_utils import get_calendar, register_calendar, ...`
#
# Some calendars like for instance the Korean exchange have been extensively updated and might no longer
# work as expected
try:
from exchange_calendars import ExchangeCalendar as TradingCalendar
from exchange_calendars.calendar_utils import (
ExchangeCalendarDispatcher,
_default_calendar_factories,
_default_calendar_aliases,
)
from exchange_calendars.errors import InvalidCalendarName
from exchange_calendars.utils.memoize import lazyval
from exchange_calendars.utils.pandas_utils import days_at_time # noqa: reexport
def _fabricate(self, name: str, **kwargs):
"""Fabricate calendar with `name` and `**kwargs`."""
try:
factory = self._calendar_factories[name]
except KeyError as e:
raise InvalidCalendarName(calendar_name=name) from e
if name in ["us_futures", "CMES", "XNYS"]:
# exchange_calendars has a different default start data
# that we need to overwrite in order to pass the legacy tests
setattr(factory, "default_start", pd.Timestamp("1990-01-01", tz=UTC))
# kwargs["start"] = pd.Timestamp("1990-01-01", tz="UTC")
if name not in ["us_futures", "24/7", "24/5", "CMES"]:
# Zipline had default open time of t+1min
factory.open_times = [
(d, t.replace(minute=t.minute + 1)) for d, t in factory.open_times
]
calendar = factory(**kwargs)
self._factory_output_cache[name] = (calendar, kwargs)
return calendar
# Yay! Monkey patching
ExchangeCalendarDispatcher._fabricate = _fabricate
global_calendar_dispatcher = ExchangeCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases,
)
get_calendar = global_calendar_dispatcher.get_calendar
get_calendar_names = global_calendar_dispatcher.get_calendar_names
clear_calendars = global_calendar_dispatcher.clear_calendars
deregister_calendar = global_calendar_dispatcher.deregister_calendar
register_calendar = global_calendar_dispatcher.register_calendar
register_calendar_type = global_calendar_dispatcher.register_calendar_type
register_calendar_alias = global_calendar_dispatcher.register_calendar_alias
resolve_alias = global_calendar_dispatcher.resolve_alias
aliases_to_names = global_calendar_dispatcher.aliases_to_names
names_to_aliases = global_calendar_dispatcher.names_to_aliases
except ImportError:
if PANDAS_VERSION > "1.2.5":
raise ImportError("For pandas >= 1.3 YOU MUST INSTALL exchange-calendars")
else:
from trading_calendars import (
register_calendar,
TradingCalendar,
get_calendar,
register_calendar_alias,
)
from trading_calendars.calendar_utils import global_calendar_dispatcher
from trading_calendars.utils.memoize import lazyval
from trading_calendars.utils.pandas_utils import days_at_time # noqa: reexport | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/calendar_utils.py | calendar_utils.py |
@object.__new__
class nop_context(object):
"""A nop context manager."""
def __enter__(self):
pass
def __exit__(self, *excinfo):
pass
def _nop(*args, **kwargs):
pass
class CallbackManager(object):
"""Create a context manager from a pre-execution callback and a
post-execution callback.
Parameters
----------
pre : (...) -> any, optional
A pre-execution callback. This will be passed ``*args`` and
``**kwargs``.
post : (...) -> any, optional
A post-execution callback. This will be passed ``*args`` and
``**kwargs``.
Notes
-----
The enter value of this context manager will be the result of calling
``pre(*args, **kwargs)``
Examples
--------
>>> def pre(where):
... print('entering %s block' % where)
>>> def post(where):
... print('exiting %s block' % where)
>>> manager = CallbackManager(pre, post)
>>> with manager('example'):
... print('inside example block')
entering example block
inside example block
exiting example block
These are reusable with different args:
>>> with manager('another'):
... print('inside another block')
entering another block
inside another block
exiting another block
"""
def __init__(self, pre=None, post=None):
self.pre = pre if pre is not None else _nop
self.post = post if post is not None else _nop
def __call__(self, *args, **kwargs):
return _ManagedCallbackContext(self.pre, self.post, args, kwargs)
# special case, if no extra args are passed make this a context manager
# which forwards no args to pre and post
def __enter__(self):
return self.pre()
def __exit__(self, *excinfo):
self.post()
class _ManagedCallbackContext(object):
def __init__(self, pre, post, args, kwargs):
self._pre = pre
self._post = post
self._args = args
self._kwargs = kwargs
def __enter__(self):
return self._pre(*self._args, **self._kwargs)
def __exit__(self, *excinfo):
self._post(*self._args, **self._kwargs) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/context_tricks.py | context_tricks.py |
import operator as op
from toolz import peek
from zipline.utils.functional import foldr
def from_tuple(tup):
"""Convert a tuple into a range with error handling.
Parameters
----------
tup : tuple (len 2 or 3)
The tuple to turn into a range.
Returns
-------
range : range
The range from the tuple.
Raises
------
ValueError
Raised when the tuple length is not 2 or 3.
"""
if len(tup) not in (2, 3):
raise ValueError(
"tuple must contain 2 or 3 elements, not: %d (%r"
% (
len(tup),
tup,
),
)
return range(*tup)
def maybe_from_tuple(tup_or_range):
"""Convert a tuple into a range but pass ranges through silently.
This is useful to ensure that input is a range so that attributes may
be accessed with `.start`, `.stop` or so that containment checks are
constant time.
Parameters
----------
tup_or_range : tuple or range
A tuple to pass to from_tuple or a range to return.
Returns
-------
range : range
The input to convert to a range.
Raises
------
ValueError
Raised when the input is not a tuple or a range. ValueError is also
raised if the input is a tuple whose length is not 2 or 3.
"""
if isinstance(tup_or_range, tuple):
return from_tuple(tup_or_range)
elif isinstance(tup_or_range, range):
return tup_or_range
raise ValueError(
"maybe_from_tuple expects a tuple or range, got %r: %r"
% (
type(tup_or_range).__name__,
tup_or_range,
),
)
def _check_steps(a, b):
"""Check that the steps of ``a`` and ``b`` are both 1.
Parameters
----------
a : range
The first range to check.
b : range
The second range to check.
Raises
------
ValueError
Raised when either step is not 1.
"""
if a.step != 1:
raise ValueError("a.step must be equal to 1, got: %s" % a.step)
if b.step != 1:
raise ValueError("b.step must be equal to 1, got: %s" % b.step)
def overlap(a, b):
"""Check if two ranges overlap.
Parameters
----------
a : range
The first range.
b : range
The second range.
Returns
-------
overlaps : bool
Do these ranges overlap.
Notes
-----
This function does not support ranges with step != 1.
"""
_check_steps(a, b)
return a.stop >= b.start and b.stop >= a.start
def merge(a, b):
"""Merge two ranges with step == 1.
Parameters
----------
a : range
The first range.
b : range
The second range.
"""
_check_steps(a, b)
return range(min(a.start, b.start), max(a.stop, b.stop))
def _combine(n, rs):
"""helper for ``_group_ranges``"""
try:
r, rs = peek(rs)
except StopIteration:
yield n
return
if overlap(n, r):
yield merge(n, r)
next(rs)
for r in rs:
yield r
else:
yield n
for r in rs:
yield r
def group_ranges(ranges):
"""Group any overlapping ranges into a single range.
Parameters
----------
ranges : iterable[ranges]
A sorted sequence of ranges to group.
Returns
-------
grouped : iterable[ranges]
A sorted sequence of ranges with overlapping ranges merged together.
"""
return foldr(_combine, ranges, ())
def sorted_diff(rs, ss):
try:
r, rs = peek(rs)
except StopIteration:
return
try:
s, ss = peek(ss)
except StopIteration:
for r in rs:
yield r
return
rtup = (r.start, r.stop)
stup = (s.start, s.stop)
if rtup == stup:
next(rs)
next(ss)
elif rtup < stup:
yield next(rs)
else:
next(ss)
for t in sorted_diff(rs, ss):
yield t
def intersecting_ranges(ranges):
"""Return any ranges that intersect.
Parameters
----------
ranges : iterable[ranges]
A sequence of ranges to check for intersections.
Returns
-------
intersections : iterable[ranges]
A sequence of all of the ranges that intersected in ``ranges``.
Examples
--------
>>> ranges = [range(0, 1), range(2, 5), range(4, 7)]
>>> list(intersecting_ranges(ranges))
[range(2, 5), range(4, 7)]
>>> ranges = [range(0, 1), range(2, 3)]
>>> list(intersecting_ranges(ranges))
[]
>>> ranges = [range(0, 1), range(1, 2)]
>>> list(intersecting_ranges(ranges))
[range(0, 1), range(1, 2)]
"""
ranges = sorted(ranges, key=op.attrgetter("start"))
return sorted_diff(ranges, group_ranges(ranges)) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/range.py | range.py |
import click
import os
import sys
import warnings
import re #20230206 (by MRC)
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
PYGMENTS = True
except ImportError:
PYGMENTS = False
import logbook
import pandas as pd
from toolz import concatv
from zipline.utils.calendar_utils import get_calendar
from zipline.data import bundles
from zipline.data.benchmarks import get_benchmark_returns_from_file
from zipline.data.treasury import get_treasury_returns_from_file #20230210 (by MRC)
from zipline.data.data_portal import DataPortal
from zipline.finance import metrics
from zipline.finance.trading import SimulationParameters
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders import USEquityPricingLoader
import zipline.utils.paths as pth
from zipline.utils.input_validation import expect_types #20230809 (by MRC)
from zipline.extensions import load
from zipline.errors import SymbolNotFound
from zipline.errors import (IllegalDTypeException,IllegalColumnException) #20230206 (by MRC)
from zipline.algorithm import TradingAlgorithm, NoBenchmark
from zipline.algorithm import NoTreasury #20230210 (by MRC)
from zipline.finance.blotter import Blotter
import zipline #20230325 (by MRC)
log = logbook.Logger(__name__)
class _RunAlgoError(click.ClickException, ValueError):
"""Signal an error that should have a different message if invoked from
the cli.
Parameters
----------
pyfunc_msg : str
The message that will be shown when called as a python function.
cmdline_msg : str, optional
The message that will be shown on the command line. If not provided,
this will be the same as ``pyfunc_msg`
"""
exit_code = 1
def __init__(self, pyfunc_msg, cmdline_msg=None):
if cmdline_msg is None:
cmdline_msg = pyfunc_msg
super(_RunAlgoError, self).__init__(cmdline_msg)
self.pyfunc_msg = pyfunc_msg
def __str__(self):
return self.pyfunc_msg
# TODO: simplify
# flake8: noqa: C901
def _run(
handle_data,
initialize,
before_trading_start,
analyze,
algofile,
algotext,
defines,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
environ,
blotter,
custom_loader,
benchmark_spec,
treasury_spec #20230209 (by MRC) for treasury return column in perf
):
"""Run a backtest for the given algorithm.
This is shared between the cli and :func:`zipline.run_algo`.
"""
bundle_data = bundles.load(
bundle,
environ,
bundle_timestamp,
)
if trading_calendar is None:
#trading_calendar = get_calendar("XNYS") #20230208 (by MRC) modify default value
trading_calendar = get_calendar("TEJ") #20230516 (by MRC) modify default value
# date parameter validation
if trading_calendar.session_distance(start, end) < 1:
raise _RunAlgoError(
"There are no trading days between %s and %s"
% (
start.date(),
end.date(),
),
)
benchmark_sid, benchmark_returns = benchmark_spec.resolve(
asset_finder=bundle_data.asset_finder,
start_date=start,
end_date=end,
)
#--------------------------------------------------------------------
#20230209 (by MRC) for treasury return column in perf #start#
#Note:other source of treasury returns(not available now)
treasury_sid, treasury_returns = treasury_spec.resolve(
asset_finder=bundle_data.asset_finder,
start_date=start,
end_date=end,
)
#20230209 (by MRC) for treasury return column in perf #end#
#--------------------------------------------------------------------
if algotext is not None:
if local_namespace:
ip = get_ipython() # noqa
namespace = ip.user_ns
else:
namespace = {}
for assign in defines:
try:
name, value = assign.split("=", 2)
except ValueError:
raise ValueError(
"invalid define %r, should be of the form name=value" % assign,
)
try:
# evaluate in the same namespace so names may refer to
# eachother
namespace[name] = eval(value, namespace)
except Exception as e:
raise ValueError(
"failed to execute definition for name %r: %s" % (name, e),
)
elif defines:
raise _RunAlgoError(
"cannot pass define without `algotext`",
"cannot pass '-D' / '--define' without '-t' / '--algotext'",
)
else:
namespace = {}
if algofile is not None:
algotext = algofile.read()
if print_algo:
if PYGMENTS:
highlight(
algotext,
PythonLexer(),
TerminalFormatter(),
outfile=sys.stdout,
)
else:
click.echo(algotext)
first_trading_day = bundle_data.equity_minute_bar_reader.first_trading_day
data = DataPortal(
bundle_data.asset_finder,
trading_calendar=trading_calendar,
first_trading_day=first_trading_day,
equity_minute_reader=bundle_data.equity_minute_bar_reader,
equity_daily_reader=bundle_data.equity_daily_bar_reader,
adjustment_reader=bundle_data.adjustment_reader,
future_minute_reader=bundle_data.equity_minute_bar_reader,
future_daily_reader=bundle_data.equity_daily_bar_reader,
)
pipeline_loader = USEquityPricingLoader.without_fx(
bundle_data.equity_daily_bar_reader,
bundle_data.adjustment_reader,
)
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
try:
return custom_loader.get(column)
except KeyError:
raise ValueError("No PipelineLoader registered for column %s." % column)
if isinstance(metrics_set, str):
try:
metrics_set = metrics.load(metrics_set)
except ValueError as e:
raise _RunAlgoError(str(e))
if isinstance(blotter, str):
try:
blotter = load(Blotter, blotter)
except ValueError as e:
raise _RunAlgoError(str(e))
try:
perf = TradingAlgorithm(
namespace=namespace,
data_portal=data,
get_pipeline_loader=choose_loader,
trading_calendar=trading_calendar,
sim_params=SimulationParameters(
start_session=start,
end_session=end,
trading_calendar=trading_calendar,
capital_base=capital_base,
data_frequency=data_frequency,
),
metrics_set=metrics_set,
blotter=blotter,
benchmark_returns=benchmark_returns,
benchmark_sid=benchmark_sid,
treasury_returns=treasury_returns, #20230209 (by MRC) for treasury return column in perf
treasury_sid=treasury_sid, #20230209 (by MRC) for treasury return column in perf
**{
"initialize": initialize,
"handle_data": handle_data,
"before_trading_start": before_trading_start,
"analyze": analyze,
}
if algotext is None
else {
"algo_filename": getattr(algofile, "name", "<algorithm>"),
"script": algotext,
},
).run()
except NoBenchmark:
raise _RunAlgoError(
(
"No ``benchmark_spec`` was provided, and"
" ``zipline.api.set_benchmark`` was not called in"
" ``initialize``."
),
(
"Neither '--benchmark-symbol' nor '--benchmark-sid' was"
" provided, and ``zipline.api.set_benchmark`` was not called"
" in ``initialize``. Did you mean to pass '--no-benchmark'?"
),
)
#20230210-------------------------------------------------------------------------
#Note:generally useless
except NoTreasury:
raise _RunAlgoError(
(
"No ``treasury_spec`` was provided."
),
(
"Neither '--treasury-symbol' nor '--treasury-sid' was"
" provided. Did you mean to pass '--no-benchmark'?"
),
)
#20230210-------------------------------------------------------------------------
if output == "-":
click.echo(str(perf))
elif output != os.devnull: # make the zipline magic not write any data
perf.to_pickle(output)
return perf
# All of the loaded extensions. We don't want to load an extension twice.
_loaded_extensions = set()
def load_extensions(default, extensions, strict, environ, reload=False):
"""Load all of the given extensions. This should be called by run_algo
or the cli.
Parameters
----------
default : bool
Load the default exension (~/.zipline/extension.py)?
extension : iterable[str]
The paths to the extensions to load. If the path ends in ``.py`` it is
treated as a script and executed. If it does not end in ``.py`` it is
treated as a module to be imported.
strict : bool
Should failure to load an extension raise. If this is false it will
still warn.
environ : mapping
The environment to use to find the default extension path.
reload : bool, optional
Reload any extensions that have already been loaded.
"""
if default:
default_extension_path = pth.default_extension(environ=environ)
pth.ensure_file(default_extension_path)
# put the default extension first so other extensions can depend on
# the order they are loaded
extensions = concatv([default_extension_path], extensions)
for ext in extensions:
if ext in _loaded_extensions and not reload:
continue
try:
# load all of the zipline extensionss
if ext.endswith(".py"):
# with open(ext) as f: # 20221222 (by LDW)
with open(ext , encoding="utf-8") as f: # 20221222 (by LDW)
ns = {}
exec(compile(f.read(), ext, "exec"), ns, ns)
else:
__import__(ext)
except Exception as e:
if strict:
# if `strict` we should raise the actual exception and fail
raise
# without `strict` we should just log the failure
warnings.warn("Failed to load extension: %r\n%s" % (ext, e), stacklevel=2)
else:
_loaded_extensions.add(ext)
def run_algorithm(
start,
end,
initialize,
capital_base,
handle_data=None,
before_trading_start=None,
analyze=None,
data_frequency="daily",
bundle="quantopian-quandl",
bundle_timestamp=None,
trading_calendar=None,
metrics_set="default",
benchmark_returns=None,
treasury_returns=None, #20230209 (by MRC) for treasury return column in perf
default_extension=True,
extensions=(),
strict_extensions=True,
environ=os.environ,
custom_loader=None,
blotter="default",
):
"""
Run a trading algorithm.
Parameters
----------
start : datetime
The start date of the backtest.
end : datetime
The end date of the backtest..
initialize : callable[context -> None]
The initialize function to use for the algorithm. This is called once
at the very begining of the backtest and should be used to set up
any state needed by the algorithm.
capital_base : float
The starting capital for the backtest.
handle_data : callable[(context, BarData) -> None], optional
The handle_data function to use for the algorithm. This is called
every minute when ``data_frequency == 'minute'`` or every day
when ``data_frequency == 'daily'``.
before_trading_start : callable[(context, BarData) -> None], optional
The before_trading_start function for the algorithm. This is called
once before each trading day (after initialize on the first day).
analyze : callable[(context, pd.DataFrame) -> None], optional
The analyze function to use for the algorithm. This function is called
once at the end of the backtest and is passed the context and the
performance data.
data_frequency : {'daily', 'minute'}, optional
The data frequency to run the algorithm at.
bundle : str, optional
The name of the data bundle to use to load the data to run the backtest
with. This defaults to 'quantopian-quandl'.
bundle_timestamp : datetime, optional
The datetime to lookup the bundle data for. This defaults to the
current time.
trading_calendar : TradingCalendar, optional
The trading calendar to use for your backtest.
metrics_set : iterable[Metric] or str, optional
The set of metrics to compute in the simulation. If a string is passed,
resolve the set with :func:`zipline.finance.metrics.load`.
benchmark_returns : pd.Series, optional
Series of returns to use as the benchmark.
treasury_returns : pd.Series, optional
Series of returns to use as the treasury.
default_extension : bool, optional
Should the default zipline extension be loaded. This is found at
``$ZIPLINE_ROOT/extension.py``
extensions : iterable[str], optional
The names of any other extensions to load. Each element may either be
a dotted module path like ``a.b.c`` or a path to a python file ending
in ``.py`` like ``a/b/c.py``.
strict_extensions : bool, optional
Should the run fail if any extensions fail to load. If this is false,
a warning will be raised instead.
environ : mapping[str -> str], optional
The os environment to use. Many extensions use this to get parameters.
This defaults to ``os.environ``.
blotter : str or zipline.finance.blotter.Blotter, optional
Blotter to use with this algorithm. If passed as a string, we look for
a blotter construction function registered with
``zipline.extensions.register`` and call it with no parameters.
Default is a :class:`zipline.finance.blotter.SimulationBlotter` that
never cancels orders.
Returns
-------
perf : pd.DataFrame
The daily performance of the algorithm.
See Also
--------
zipline.data.bundles.bundles : The available data bundles.
"""
load_extensions(default_extension, extensions, strict_extensions, environ)
benchmark_spec = BenchmarkSpec.from_returns(benchmark_returns)
treasury_spec = TreasurySpec.from_returns(treasury_returns) #20230210(by MRC) for treasury return column in perf
return _run(
handle_data=handle_data,
initialize=initialize,
before_trading_start=before_trading_start,
analyze=analyze,
algofile=None,
algotext=None,
defines=(),
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=os.devnull,
trading_calendar=trading_calendar,
print_algo=False,
metrics_set=metrics_set,
local_namespace=False,
environ=environ,
blotter=blotter,
custom_loader=custom_loader,
benchmark_spec=benchmark_spec,
treasury_spec=treasury_spec #20230209 (by MRC) for treasury return column in perf
)
def _transform_perf_table_for_record(perf, column):
'''
20230206 (by MRC)
transform price, factor and group data returned by zipline.TradingAlgorithm.run().
Parameters
----------
perf : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
column
-------
column : str
The name of the column.
reference
---------
1.ML4T 04_alpha_factor_research/06_performance_eval_alphalens.ipynb
2.pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
TODO
---------
這寫法需優化
'''
df1 = []
df_YN = 'N'
# 以下處理需假設perf[column]中所有items的type一致
for d, df in perf[column].dropna().items():
if isinstance(df, pd.Series):
df1.append(df.to_frame(d))
elif isinstance(df, list):
df1.append(pd.DataFrame(df, columns=[d]))
elif isinstance(df, int):
df1.append(pd.DataFrame([df], columns=[d]))
elif isinstance(df, float):
df1.append(pd.DataFrame([df], columns=[d]))
elif isinstance(df, set):
df1.append(pd.DataFrame(list(df), columns=[d]))
elif isinstance(df, pd.DataFrame): # 針對data.history()結果
df = df.reset_index()
df['date'] = d
df = df.set_index('date')
df1.append(df)
df_YN = 'Y'
else:
raise IllegalDTypeException(parameter =f'"results[{column}].items()"',
dtype ='"pd.Series or list or int or float or set"')
# Concatenate the individual DataFrames
if df_YN=='N':
data = pd.concat(df1, axis=1).T
else:
data = pd.DataFrame()
for i in df1:
data = i.append(data)
# transform sid to symbol
for i in data.columns:
if type(i)==zipline.assets._assets.Equity:
data.rename({i:i.symbol}, axis=1,inplace=True)
else:
pass
# Convert times to midnight
# data.index=data.index.tz_convert(tz='UTC')
data.index = data.index.normalize()
return data, df_YN
@expect_types(perf=pd.DataFrame,
columns=list)
def get_record_vars(perf,
columns):
'''
20230325 (by MRC)
Export columns recorded by zipline.api.record()
and returned by zipline.TradingAlgorithm.run().
Parameters
----------
perf : pd.DataFrame
DataFrame returned by zipline.TradingAlgorithm.run()
column
-------
columns : list
A list of the column names.
TODO
---------
這寫法需優化
'''
dict_data = {}
for i in columns:
if i in perf.columns:
data, df_YN = _transform_perf_table_for_record(perf,i)
if df_YN=='N':
dict_data[i] = (data.stack()
.reset_index()
.rename(columns={'level_0':'date',
'level_1':'symbol',
0:i})
)
else: # 針對data.history()結果
column_names = data.reset_index().columns
dict_data[i] = (data.reset_index()
.set_index([column_names[0],column_names[1]])
.stack()
.reset_index()
.rename(columns={'level_2':'symbol',
0:i})
.sort_values(by=[column_names[0],column_names[1]])
)
#dict_data[i].index.names = ['date',i]
if type(dict_data[i].values.all())==zipline.assets._assets.Equity:
dict_data[i] = dict_data[i].apply(lambda x:x.symbol)
else:
pass
else:
log.info(i + 'does not exist')
return dict_data
def transform_perf_table(perf, column):
'''
20230206 (by MRC)
transform positions/transactions/orders data returned by zipline.TradingAlgorithm.run().
Parameters
----------
perf : pd.DataFrame
The daily performance of the algorithm.
column : str
The name of the positions/transactions/orders column.
reference
---------
1.ML4T 04_alpha_factor_research/06_performance_eval_alphalens.ipynb
2.pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
'''
if column not in perf.columns:
raise IllegalColumnException(column = column)
raw = []
for dt, row in perf[column].iteritems():
df = pd.DataFrame(row)
df.index = [dt] * len(df)
raw.append(df)
out = pd.concat(raw)
if len(out)==0:
print('The backtest does not have any', str(column))
else:
out.rename(columns={'sid':'asset'},inplace=True)
out.insert(0, 'sid', out['asset'].apply(lambda x: x.sid))
out.insert(1, 'symbol', out['asset'].apply(lambda x: x.symbol))
return out
@expect_types(perf=pd.DataFrame)
def get_transaction_detail(perf):
'''
20230206 (by MRC)
get positions/transactions/orders data returned by zipline.TradingAlgorithm.run().
Parameters
----------
perf : pd.DataFrame
The daily performance of the algorithm.
Note
----------
similar to pyfolio.utils.extract_rets_pos_txn_from_zipline(backtest)
'''
positions = transform_perf_table(perf,'positions')
transactions = transform_perf_table(perf,'transactions')
orders = transform_perf_table(perf,'orders')
return positions,transactions,orders
class BenchmarkSpec(object):
"""
Helper for different ways we can get benchmark data(treasury data) for the Zipline CLI and
zipline.utils.run_algo.run_algorithm.
Parameters
----------
benchmark_returns : pd.Series, optional
Series of returns to use as the benchmark.
benchmark_file : str or file
File containing a csv with `date` and `return` columns, to be read as
the benchmark.
benchmark_sid : int, optional
Sid of the asset to use as a benchmark.
benchmark_symbol : str, optional
Symbol of the asset to use as a benchmark. Symbol will be looked up as
of the end date of the backtest.
no_benchmark : bool
Flag indicating that no benchmark is configured. Benchmark-dependent
metrics will be calculated using a dummy benchmark of all-zero returns.
"""
def __init__(
self,
benchmark_returns,
benchmark_file,
benchmark_sid,
benchmark_symbol,
no_benchmark,
):
self.benchmark_returns = benchmark_returns
self.benchmark_file = benchmark_file
self.benchmark_sid = benchmark_sid
self.benchmark_symbol = benchmark_symbol
self.no_benchmark = no_benchmark
@classmethod
def from_cli_params(
cls, benchmark_sid, benchmark_symbol, benchmark_file, no_benchmark
):
return cls(
benchmark_returns=None,
benchmark_sid=benchmark_sid,
benchmark_symbol=benchmark_symbol,
benchmark_file=benchmark_file,
no_benchmark=no_benchmark,
)
@classmethod
def from_returns(cls, benchmark_returns):
return cls(
benchmark_returns=benchmark_returns,
benchmark_file=None,
benchmark_sid=None,
benchmark_symbol=None,
no_benchmark=benchmark_returns is None,
)
#inputs priority(benchmark_returns>benchmark_file>benchmark_sid>benchmark_symbol>no_benchmark)
def resolve(self, asset_finder, start_date, end_date):
"""
Resolve inputs into values to be passed to TradingAlgorithm.
Returns a pair of ``(benchmark_sid, benchmark_returns)`` with at most
one non-None value. Both values may be None if no benchmark source has
been configured.
Parameters
----------
asset_finder : zipline.assets.AssetFinder
Asset finder for the algorithm to be run.
start_date : pd.Timestamp
Start date of the algorithm to be run.
end_date : pd.Timestamp
End date of the algorithm to be run.
Returns
-------
benchmark_sid : int
Sid to use as benchmark.
benchmark_returns : pd.Series
Series of returns to use as benchmark.
"""
if self.benchmark_returns is not None:
benchmark_sid = None
benchmark_returns = self.benchmark_returns
elif self.benchmark_file is not None:
benchmark_sid = None
benchmark_returns = get_benchmark_returns_from_file(
self.benchmark_file,
)
elif self.benchmark_sid is not None:
benchmark_sid = self.benchmark_sid
benchmark_returns = None
elif self.benchmark_symbol is not None:
try:
asset = asset_finder.lookup_symbol(
self.benchmark_symbol,
as_of_date=end_date,
)
benchmark_sid = asset.sid
benchmark_returns = None
except SymbolNotFound:
raise _RunAlgoError(
"Symbol %r as a benchmark not found in this bundle."
% self.benchmark_symbol
)
elif self.no_benchmark:
benchmark_sid = None
benchmark_returns = self._zero_benchmark_returns(
start_date=start_date,
end_date=end_date,
)
else:
log.warn(
"No benchmark configured. " "Assuming algorithm calls set_benchmark."
)
log.warn(
"Pass --benchmark-sid, --benchmark-symbol, or"
" --benchmark-file to set a source of benchmark returns."
)
log.warn(
"Pass --no-benchmark to use a dummy benchmark " "of zero returns.",
)
benchmark_sid = None
benchmark_returns = None
return benchmark_sid, benchmark_returns
@staticmethod
def _zero_benchmark_returns(start_date, end_date):
return pd.Series(
index=pd.date_range(start_date, end_date, tz="utc"),
data=0.0,
)
#--------------------------------------------------------------------
#20230210 (by MRC) for treasury return column in perf #end#
class TreasurySpec(object):
"""
Helper for different ways we can get treasury data for the Zipline CLI and
zipline.utils.run_algo.run_algorithm.
Parameters
----------
treasury_returns : pd.Series, optional
Series of returns to use as the treasury.
treasury_file : str or file
File containing a csv with `date` and `return` columns, to be read as
the treasury.
treasury_sid(not available now) : int, optional
Sid of the asset to use as a treasury.
treasury_symbol(not available now) : str, optional
Symbol of the asset to use as a treasury. Symbol will be looked up as
of the end date of the backtest.
no_treasury : bool
Flag indicating that no treasury is configured. Treasury-dependent
metrics will be calculated using a dummy treasury of all-zero returns.
Reference
----------
BenchmarkSpec()
"""
def __init__(
self,
treasury_returns,
treasury_file,
treasury_sid,
treasury_symbol,
no_treasury,
):
self.treasury_returns = treasury_returns
self.treasury_file = treasury_file
self.treasury_sid = treasury_sid
self.treasury_symbol = treasury_symbol
self.no_treasury = no_treasury
@classmethod
def from_cli_params(
cls, treasury_sid, treasury_symbol, treasury_file, no_treasury
):
#do not use treasury_returns as input
return cls(
treasury_returns=None,
treasury_sid=treasury_sid,
treasury_symbol=treasury_symbol,
treasury_file=treasury_file,
no_treasury=no_treasury,
)
@classmethod
def from_returns(cls, treasury_returns):
#use treasury_returns as input
return cls(
treasury_returns=treasury_returns,
treasury_file=None,
treasury_sid=None,
treasury_symbol=None,
no_treasury=treasury_returns is None,
)
def resolve(self, asset_finder, start_date, end_date):
"""
Resolve inputs into values to be passed to TradingAlgorithm.
Returns a pair of ``(treasury_sid, treasury_returns)`` with at most
one non-None value. Both values may be None if no treasury source has
been configured.
Parameters
----------
asset_finder : zipline.assets.AssetFinder
Asset finder for the algorithm to be run.
start_date : pd.Timestamp
Start date of the algorithm to be run.
end_date : pd.Timestamp
End date of the algorithm to be run.
Returns
-------
treasury_sid : int
Sid to use as treasury.
treasury_returns : pd.Series
Series of returns to use as treasury.
"""
#inputs priority(treasury_returns>treasury_file>treasury_sid>treasury_symbol>no_treasury)
if self.treasury_returns is not None:
treasury_sid = None
treasury_returns = self.treasury_returns
elif self.treasury_file is not None:
treasury_sid = None
treasury_returns = get_treasury_returns_from_file(
self.treasury_file,
)
elif self.treasury_sid is not None:
treasury_sid = self.treasury_sid
treasury_returns = None
elif self.treasury_symbol is not None:
try:
asset = asset_finder.lookup_symbol(
self.treasury_symbol,
as_of_date=end_date,
)
treasury_sid = asset.sid
treasury_returns = None
except SymbolNotFound:
raise _RunAlgoError(
"Symbol %r as a treasury not found in this bundle."
% self.treasury_symbol
)
elif self.no_treasury:
treasury_sid = None
treasury_returns = self._zero_treasury_returns(
start_date=start_date,
end_date=end_date,
)
else:
log.warn(
"No treasury configured. " "Assuming algorithm calls set_treasury."
)
log.warn(
"Pass --treasury-sid, --treasury-symbol, or"
" --treasury-file to set a source of treasury returns."
)
log.warn(
"Pass --no-treasury to use a dummy treasury " "of zero returns.",
)
treasury_sid = None
treasury_returns = None
return treasury_sid, treasury_returns
@staticmethod
def _zero_treasury_returns(start_date, end_date):
return pd.Series(
index=pd.date_range(start_date, end_date, tz="utc"),
data=0.0,
)
#20230210 (by MRC) for treasury return column in perf #end#
#-------------------------------------------------------------------- | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/run_algo.py | run_algo.py |
from datetime import tzinfo
from functools import partial
from operator import attrgetter
from numpy import dtype
import pandas as pd
from pytz import timezone
from toolz import valmap, complement, compose
import toolz.curried.operator as op
from zipline.utils.compat import wraps
from zipline.utils.functional import getattrs
from zipline.utils.preprocess import call, preprocess
_qualified_name = attrgetter("__qualname__")
def verify_indices_all_unique(obj):
"""
Check that all axes of a pandas object are unique.
Parameters
----------
obj : pd.Series / pd.DataFrame / pd.Panel
The object to validate.
Returns
-------
obj : pd.Series / pd.DataFrame / pd.Panel
The validated object, unchanged.
Raises
------
ValueError
If any axis has duplicate entries.
"""
axis_names = [
("index",), # Series
("index", "columns"), # DataFrame
("items", "major_axis", "minor_axis"), # Panel
][
obj.ndim - 1
] # ndim = 1 should go to entry 0,
for axis_name, index in zip(axis_names, obj.axes):
if index.is_unique:
continue
raise ValueError(
"Duplicate entries in {type}.{axis}: {dupes}.".format(
type=type(obj).__name__,
axis=axis_name,
dupes=sorted(index[index.duplicated()]),
)
)
return obj
def optionally(preprocessor):
"""Modify a preprocessor to explicitly allow `None`.
Parameters
----------
preprocessor : callable[callable, str, any -> any]
A preprocessor to delegate to when `arg is not None`.
Returns
-------
optional_preprocessor : callable[callable, str, any -> any]
A preprocessor that delegates to `preprocessor` when `arg is not None`.
Examples
--------
>>> def preprocessor(func, argname, arg):
... if not isinstance(arg, int):
... raise TypeError('arg must be int')
... return arg
...
>>> @preprocess(a=optionally(preprocessor))
... def f(a):
... return a
...
>>> f(1) # call with int
1
>>> f('a') # call with not int
Traceback (most recent call last):
...
TypeError: arg must be int
>>> f(None) is None # call with explicit None
True
"""
@wraps(preprocessor)
def wrapper(func, argname, arg):
return arg if arg is None else preprocessor(func, argname, arg)
return wrapper
def ensure_upper_case(func, argname, arg):
if isinstance(arg, str):
return arg.upper()
else:
raise TypeError(
"{0}() expected argument '{1}' to"
" be a string, but got {2} instead.".format(
func.__name__,
argname,
arg,
),
)
def ensure_dtype(func, argname, arg):
"""
Argument preprocessor that converts the input into a numpy dtype.
Examples
--------
>>> import numpy as np
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(dtype=ensure_dtype)
... def foo(dtype):
... return dtype
...
>>> foo(float)
dtype('float64')
"""
try:
return dtype(arg)
except TypeError:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a numpy dtype.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timezone(func, argname, arg):
"""Argument preprocessor that converts the input into a tzinfo object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(tz=ensure_timezone)
... def foo(tz):
... return tz
>>> foo('utc')
<UTC>
"""
if isinstance(arg, tzinfo):
return arg
if isinstance(arg, str):
return timezone(arg)
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a timezone.".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
),
)
def ensure_timestamp(func, argname, arg):
"""Argument preprocessor that converts the input into a pandas Timestamp
object.
Examples
--------
>>> from zipline.utils.preprocess import preprocess
>>> @preprocess(ts=ensure_timestamp)
... def foo(ts):
... return ts
>>> foo('2014-01-01')
Timestamp('2014-01-01 00:00:00')
"""
try:
return pd.Timestamp(arg)
except ValueError as e:
raise TypeError(
"{func}() couldn't convert argument "
"{argname}={arg!r} to a pandas Timestamp.\n"
"Original error was: {t}: {e}".format(
func=_qualified_name(func),
argname=argname,
arg=arg,
t=_qualified_name(type(e)),
e=e,
),
)
def expect_dtypes(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected numpy dtypes.
Examples
--------
>>> from numpy import dtype, arange, int8, float64
>>> @expect_dtypes(x=dtype(int8))
... def foo(x, y):
... return x, y
...
>>> foo(arange(3, dtype=int8), 'foo')
(array([0, 1, 2], dtype=int8), 'foo')
>>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value with dtype 'int8' for argument 'x',
but got 'float64' instead.
"""
for name, type_ in named.items():
if not isinstance(type_, (dtype, tuple)):
raise TypeError(
"expect_dtypes() expected a numpy dtype or tuple of dtypes"
" for argument {name!r}, but got {dtype} instead.".format(
name=name,
dtype=dtype,
)
)
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
@preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_dtype(dtypes):
"""
Factory for dtype-checking functions that work with the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# name. Otherwise just show the value.
try:
value_to_show = value.dtype.name
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a value with dtype {dtype_str} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=get_funcname(func),
dtype_str=" or ".join(repr(d.name) for d in dtypes),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattr(argvalue, "dtype", object()) not in dtypes:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_dtype, named))
def expect_kinds(**named):
"""
Preprocessing decorator that verifies inputs have expected dtype kinds.
Examples
--------
>>> from numpy import int64, int32, float32
>>> @expect_kinds(x='i')
... def foo(x):
... return x
...
>>> foo(int64(2))
2
>>> foo(int32(2))
2
>>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x',
but got 'f' instead.
"""
for name, kind in named.items():
if not isinstance(kind, (str, tuple)):
raise TypeError(
"expect_dtype_kinds() expected a string or tuple of strings"
" for argument {name!r}, but got {kind} instead.".format(
name=name,
kind=dtype,
)
)
@preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,)))
def _expect_kind(kinds):
"""
Factory for kind-checking functions that work the @preprocess
decorator.
"""
def error_message(func, argname, value):
# If the bad value has a dtype, but it's wrong, show the dtype
# kind. Otherwise just show the value.
try:
value_to_show = value.dtype.kind
except AttributeError:
value_to_show = value
return (
"{funcname}() expected a numpy object of kind {kinds} "
"for argument {argname!r}, but got {value!r} instead."
).format(
funcname=_qualified_name(func),
kinds=" or ".join(map(repr, kinds)),
argname=argname,
value=value_to_show,
)
def _actual_preprocessor(func, argname, argvalue):
if getattrs(argvalue, ("dtype", "kind"), object()) not in kinds:
raise TypeError(error_message(func, argname, argvalue))
return argvalue
return _actual_preprocessor
return preprocess(**valmap(_expect_kind, named))
def expect_types(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs have expected types.
Examples
--------
>>> @expect_types(x=int, y=str)
... def foo(x, y):
... return x, y
...
>>> foo(2, '3')
(2, '3')
>>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...foo() expected a value of type int for argument 'x',
but got float instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
"""
for name, type_ in named.items():
if not isinstance(type_, (type, tuple)):
raise TypeError(
"expect_types() expected a type or tuple of types for "
"argument '{name}', but got {type_} instead.".format(
name=name,
type_=type_,
)
)
def _expect_type(type_):
# Slightly different messages for type and tuple of types.
_template = (
"%(funcname)s() expected a value of type {type_or_types} "
"for argument '%(argname)s', but got %(actual)s instead."
)
if isinstance(type_, tuple):
template = _template.format(
type_or_types=" or ".join(map(_qualified_name, type_))
)
else:
template = _template.format(type_or_types=_qualified_name(type_))
return make_check(
exc_type=TypeError,
template=template,
pred=lambda v: not isinstance(v, type_),
actual=compose(_qualified_name, type),
funcname=__funcname,
)
return preprocess(**valmap(_expect_type, named))
def make_check(exc_type, template, pred, actual, funcname):
"""
Factory for making preprocessing functions that check a predicate on the
input value.
Parameters
----------
exc_type : Exception
The exception type to raise if the predicate fails.
template : str
A template string to use to create error messages.
Should have %-style named template parameters for 'funcname',
'argname', and 'actual'.
pred : function[object -> bool]
A function to call on the argument being preprocessed. If the
predicate returns `True`, we raise an instance of `exc_type`.
actual : function[object -> object]
A function to call on bad values to produce the value to display in the
error message.
funcname : str or callable
Name to use in error messages, or function to call on decorated
functions to produce a name. Passing an explicit name is useful when
creating checks for __init__ or __new__ methods when you want the error
to refer to the class name instead of the method name.
"""
if isinstance(funcname, str):
def get_funcname(_):
return funcname
else:
get_funcname = funcname
def _check(func, argname, argvalue):
if pred(argvalue):
raise exc_type(
template
% {
"funcname": get_funcname(func),
"argname": argname,
"actual": actual(argvalue),
},
)
return argvalue
return _check
def optional(type_):
"""
Helper for use with `expect_types` when an input can be `type_` or `None`.
Returns an object such that both `None` and instances of `type_` pass
checks of the form `isinstance(obj, optional(type_))`.
Parameters
----------
type_ : type
Type for which to produce an option.
Examples
--------
>>> isinstance({}, optional(dict))
True
>>> isinstance(None, optional(dict))
True
>>> isinstance(1, optional(dict))
False
"""
return (type_, type(None))
def expect_element(__funcname=_qualified_name, **named):
"""
Preprocessing decorator that verifies inputs are elements of some
expected collection.
Examples
--------
>>> @expect_element(x=('a', 'b'))
... def foo(x):
... return x.upper()
...
>>> foo('a')
'A'
>>> foo('b')
'B'
>>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value in ('a', 'b') for argument 'x',
but got 'c' instead.
Notes
-----
A special argument, __funcname, can be provided as a string to override the
function name shown in error messages. This is most often used on __init__
or __new__ methods to make errors refer to the class name instead of the
function name.
This uses the `in` operator (__contains__) to make the containment check.
This allows us to use any custom container as long as the object supports
the container protocol.
"""
def _expect_element(collection):
if isinstance(collection, (set, frozenset)):
# Special case the error message for set and frozen set to make it
# less verbose.
collection_for_error_message = tuple(sorted(collection))
else:
collection_for_error_message = collection
template = (
"%(funcname)s() expected a value in {collection} "
"for argument '%(argname)s', but got %(actual)s instead."
).format(collection=collection_for_error_message)
return make_check(
ValueError,
template,
complement(op.contains(collection)),
repr,
funcname=__funcname,
)
return preprocess(**valmap(_expect_element, named))
def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def expect_strictly_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall EXCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_strictly_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(2)
3
>>> foo(4)
5
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value exclusively between 1 and 5 for
argument 'x', but got 5 instead.
>>> @expect_strictly_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly greater than 2 for
argument 'x', but got 2 instead.
>>> @expect_strictly_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value strictly less than 5 for
argument 'x', but got 5 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value >= upper
predicate_descr = "strictly less than " + str(upper)
elif upper is None:
def should_fail(value):
return value <= lower
predicate_descr = "strictly greater than " + str(lower)
else:
def should_fail(value):
return not (lower < value < upper)
predicate_descr = "exclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
def _expect_bounded(make_bounded_check, __funcname, **named):
def valid_bounds(t):
return isinstance(t, tuple) and len(t) == 2 and t != (None, None)
for name, bounds in named.items():
if not valid_bounds(bounds):
raise TypeError(
"expect_bounded() expected a tuple of bounds for"
" argument '{name}', but got {bounds} instead.".format(
name=name,
bounds=bounds,
)
)
return preprocess(**valmap(make_bounded_check, named))
def expect_dimensions(__funcname=_qualified_name, **dimensions):
"""
Preprocessing decorator that verifies inputs are numpy arrays with a
specific dimensionality.
Examples
--------
>>> from numpy import array
>>> @expect_dimensions(x=1, y=2)
... def foo(x, y):
... return x[0] + y[0, 0]
...
>>> foo(array([1, 1]), array([[1, 1], [2, 2]]))
2
>>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE
... # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a 2-D array for argument 'y',
but got a 1-D array instead.
"""
if isinstance(__funcname, str):
def get_funcname(_):
return __funcname
else:
get_funcname = __funcname
def _expect_dimension(expected_ndim):
def _check(func, argname, argvalue):
actual_ndim = argvalue.ndim
if actual_ndim != expected_ndim:
if actual_ndim == 0:
actual_repr = "scalar"
else:
actual_repr = "%d-D array" % actual_ndim
raise ValueError(
"{func}() expected a {expected:d}-D array"
" for argument {argname!r}, but got a {actual}"
" instead.".format(
func=get_funcname(func),
expected=expected_ndim,
argname=argname,
actual=actual_repr,
)
)
return argvalue
return _check
return preprocess(**valmap(_expect_dimension, dimensions))
def coerce(from_, to, **to_kwargs):
"""
A preprocessing decorator that coerces inputs of a given type by passing
them to a callable.
Parameters
----------
from : type or tuple or types
Inputs types on which to call ``to``.
to : function
Coercion function to call on inputs.
**to_kwargs
Additional keywords to forward to every call to ``to``.
Examples
--------
>>> @preprocess(x=coerce(float, int), y=coerce(float, int))
... def floordiff(x, y):
... return x - y
...
>>> floordiff(3.2, 2.5)
1
>>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2))
... def add_binary_strings(x, y):
... return bin(x + y)[2:]
...
>>> add_binary_strings('101', '001')
'110'
"""
def preprocessor(func, argname, arg):
if isinstance(arg, from_):
return to(arg, **to_kwargs)
return arg
return preprocessor
def coerce_types(**kwargs):
"""
Preprocessing decorator that applies type coercions.
Parameters
----------
**kwargs : dict[str -> (type, callable)]
Keyword arguments mapping function parameter names to pairs of
(from_type, to_type).
Examples
--------
>>> @coerce_types(x=(float, int), y=(int, str))
... def func(x, y):
... return (x, y)
...
>>> func(1.0, 3)
(1, '3')
"""
def _coerce(types):
return coerce(*types)
return preprocess(**valmap(_coerce, kwargs))
class error_keywords(object):
def __init__(self, *args, **kwargs):
self.messages = kwargs
def __call__(self, func):
@wraps(func)
def assert_keywords_and_call(*args, **kwargs):
for field, message in self.messages.items():
if field in kwargs:
raise TypeError(message)
return func(*args, **kwargs)
return assert_keywords_and_call
coerce_string = partial(coerce, str)
def validate_keys(dict_, expected, funcname):
"""Validate that a dictionary has an expected set of keys."""
expected = set(expected)
received = set(dict_)
missing = expected - received
if missing:
raise ValueError(
"Missing keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
)
unexpected = received - expected
if unexpected:
raise ValueError(
"Unexpected keys in {}:\n"
"Expected Keys: {}\n"
"Received Keys: {}".format(
funcname,
sorted(expected),
sorted(received),
)
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/input_validation.py | input_validation.py |
import sys
from textwrap import dedent
from types import CodeType
from uuid import uuid4
from toolz.curried.operator import getitem
from zipline.utils.compat import getargspec, wraps
if sys.version_info[0:2] >= (3, 8):
_code_argorder_head = (
"co_argcount",
"co_posonlyargcount",
"co_kwonlyargcount",
)
else:
_code_argorder_head = ("co_argcount", "co_kwonlyargcount")
_code_argorder = (_code_argorder_head) + (
"co_nlocals",
"co_stacksize",
"co_flags",
"co_code",
"co_consts",
"co_names",
"co_varnames",
"co_filename",
"co_name",
"co_firstlineno",
"co_lnotab",
"co_freevars",
"co_cellvars",
)
NO_DEFAULT = object()
def preprocess(*_unused, **processors):
"""
Decorator that applies pre-processors to the arguments of a function before
calling the function.
Parameters
----------
**processors : dict
Map from argument name -> processor function.
A processor function takes three arguments: (func, argname, argvalue).
`func` is the the function for which we're processing args.
`argname` is the name of the argument we're processing.
`argvalue` is the value of the argument we're processing.
Examples
--------
>>> def _ensure_tuple(func, argname, arg):
... if isinstance(arg, tuple):
... return argvalue
... try:
... return tuple(arg)
... except TypeError:
... raise TypeError(
... "%s() expected argument '%s' to"
... " be iterable, but got %s instead." % (
... func.__name__, argname, arg,
... )
... )
...
>>> @preprocess(arg=_ensure_tuple)
... def foo(arg):
... return arg
...
>>> foo([1, 2, 3])
(1, 2, 3)
>>> foo("a")
('a',)
>>> foo(2)
Traceback (most recent call last):
...
TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead.
"""
if _unused:
raise TypeError("preprocess() doesn't accept positional arguments")
def _decorator(f):
args, varargs, varkw, defaults = argspec = getargspec(f)
if defaults is None:
defaults = ()
no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults))
args_defaults = list(zip(args, no_defaults + defaults))
if varargs:
args_defaults.append((varargs, NO_DEFAULT))
if varkw:
args_defaults.append((varkw, NO_DEFAULT))
argset = set(args) | {varargs, varkw} - {None}
# Arguments can be declared as tuples in Python 2.
if not all(isinstance(arg, str) for arg in args):
raise TypeError(
"Can't validate functions using tuple unpacking: %s" % (argspec,)
)
# Ensure that all processors map to valid names.
bad_names = processors.keys() - argset
if bad_names:
raise TypeError("Got processors for unknown arguments: %s." % bad_names)
return _build_preprocessed_function(
f,
processors,
args_defaults,
varargs,
varkw,
)
return _decorator
def call(f):
"""
Wrap a function in a processor that calls `f` on the argument before
passing it along.
Useful for creating simple arguments to the `@preprocess` decorator.
Parameters
----------
f : function
Function accepting a single argument and returning a replacement.
Examples
--------
>>> @preprocess(x=call(lambda x: x + 1))
... def foo(x):
... return x
...
>>> foo(1)
2
"""
@wraps(f)
def processor(func, argname, arg):
return f(arg)
return processor
def _build_preprocessed_function(func, processors, args_defaults, varargs, varkw):
"""
Build a preprocessed function with the same signature as `func`.
Uses `exec` internally to build a function that actually has the same
signature as `func.
"""
format_kwargs = {"func_name": func.__name__}
def mangle(name):
return "a" + uuid4().hex + name
format_kwargs["mangled_func"] = mangled_funcname = mangle(func.__name__)
def make_processor_assignment(arg, processor_name):
template = "{arg} = {processor}({func}, '{arg}', {arg})"
return template.format(
arg=arg,
processor=processor_name,
func=mangled_funcname,
)
exec_globals = {mangled_funcname: func, "wraps": wraps}
defaults_seen = 0
default_name_template = "a" + uuid4().hex + "_%d"
signature = []
call_args = []
assignments = []
star_map = {
varargs: "*",
varkw: "**",
}
def name_as_arg(arg):
return star_map.get(arg, "") + arg
for arg, default in args_defaults:
if default is NO_DEFAULT:
signature.append(name_as_arg(arg))
else:
default_name = default_name_template % defaults_seen
exec_globals[default_name] = default
signature.append("=".join([name_as_arg(arg), default_name]))
defaults_seen += 1
if arg in processors:
procname = mangle("_processor_" + arg)
exec_globals[procname] = processors[arg]
assignments.append(make_processor_assignment(arg, procname))
call_args.append(name_as_arg(arg))
exec_str = dedent(
"""\
@wraps({wrapped_funcname})
def {func_name}({signature}):
{assignments}
return {wrapped_funcname}({call_args})
"""
).format(
func_name=func.__name__,
signature=", ".join(signature),
assignments="\n ".join(assignments),
wrapped_funcname=mangled_funcname,
call_args=", ".join(call_args),
)
compiled = compile(
exec_str,
func.__code__.co_filename,
mode="exec",
)
exec_locals = {}
exec(compiled, exec_globals, exec_locals)
new_func = exec_locals[func.__name__]
code = new_func.__code__
args = {attr: getattr(code, attr) for attr in dir(code) if attr.startswith("co_")}
# Copy the firstlineno out of the underlying function so that exceptions
# get raised with the correct traceback.
# This also makes dynamic source inspection (like IPython `??` operator)
# work as intended.
try:
# Try to get the pycode object from the underlying function.
original_code = func.__code__
except AttributeError:
try:
# The underlying callable was not a function, try to grab the
# `__func__.__code__` which exists on method objects.
original_code = func.__func__.__code__
except AttributeError:
# The underlying callable does not have a `__code__`. There is
# nothing for us to correct.
return new_func
args["co_firstlineno"] = original_code.co_firstlineno
new_func.__code__ = CodeType(*map(getitem(args), _code_argorder))
return new_func | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/preprocess.py | preprocess.py |
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import inspect
import warnings
import datetime
import numpy as np
import pandas as pd
import pytz
from toolz import curry
from zipline.utils.input_validation import preprocess
from zipline.utils.memoize import lazyval
from zipline.utils.sentinel import sentinel
from .context_tricks import nop_context
__all__ = [
"EventManager",
"Event",
"EventRule",
"StatelessRule",
"ComposedRule",
"Always",
"Never",
"AfterOpen",
"BeforeClose",
"NotHalfDay",
"NthTradingDayOfWeek",
"NDaysBeforeLastTradingDayOfWeek",
"NthTradingDayOfMonth",
"NDaysBeforeLastTradingDayOfMonth",
"StatefulRule",
"OncePerDay",
# Factory API
"date_rules",
"time_rules",
"calendars",
"make_eventrule",
]
MAX_MONTH_RANGE = 23
MAX_WEEK_RANGE = 5
def ensure_utc(time, tz="UTC"):
"""
Normalize a time. If the time is tz-naive, assume it is UTC.
"""
if not time.tzinfo:
time = time.replace(tzinfo=pytz.timezone(tz))
return time.replace(tzinfo=pytz.utc)
def _out_of_range_error(a, b=None, var="offset"):
start = 0
if b is None:
end = a - 1
else:
start = a
end = b - 1
return ValueError(
"{var} must be in between {start} and {end} inclusive".format(
var=var,
start=start,
end=end,
)
)
def _td_check(td):
seconds = td.total_seconds()
# 43200 seconds = 12 hours
if 60 <= seconds <= 43200:
return td
else:
raise ValueError(
"offset must be in between 1 minute and 12 hours, " "inclusive."
)
def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
# Filter down to just kwargs that were actually passed.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError("Cannot pass kwargs and an offset")
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords")
def _build_date(date, kwargs):
"""
Builds the date argument for event rules.
"""
if date is None:
if not kwargs:
raise ValueError("Must pass a date or kwargs")
else:
return datetime.date(**kwargs)
elif kwargs:
raise ValueError("Cannot pass kwargs and a date")
else:
return date
# TODO: only used in tests
def _build_time(time, kwargs):
"""
Builds the time argument for event rules.
"""
tz = kwargs.pop("tz", "UTC")
if time:
if kwargs:
raise ValueError("Cannot pass kwargs and a time")
else:
return ensure_utc(time, tz)
elif not kwargs:
raise ValueError("Must pass a time or kwargs")
else:
return datetime.time(**kwargs)
@curry
def lossless_float_to_int(funcname, func, argname, arg):
"""
A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError.
"""
if not isinstance(arg, float):
return arg
arg_as_int = int(arg)
if arg == arg_as_int:
warnings.warn(
"{f} expected an int for argument {name!r}, but got float {arg}."
" Coercing to int.".format(
f=funcname,
name=argname,
arg=arg,
),
)
return arg_as_int
raise TypeError(arg)
class EventManager(object):
"""Manages a list of Event objects.
This manages the logic for checking the rules and dispatching to the
handle_data function of the Events.
Parameters
----------
create_context : (BarData) -> context manager, optional
An optional callback to produce a context manager to wrap the calls
to handle_data. This will be passed the current BarData.
"""
def __init__(self, create_context=None):
self._events = []
self._create_context = (
create_context
if create_context is not None
else lambda *_: nop_context
)
def add_event(self, event, prepend=False):
"""
Adds an event to the manager.
"""
if prepend:
self._events.insert(0, event)
else:
self._events.append(event)
def handle_data(self, context, data, dt):
with self._create_context(data):
for event in self._events:
event.handle_data(
context,
data,
dt,
)
class Event(namedtuple("Event", ["rule", "callback"])):
"""
An event is a pairing of an EventRule and a callable that will be invoked
with the current algorithm context, data, and datetime only when the rule
is triggered.
"""
def __new__(cls, rule, callback=None):
callback = callback or (lambda *args, **kwargs: None)
return super(cls, cls).__new__(cls, rule=rule, callback=callback)
def handle_data(self, context, data, dt):
"""
Calls the callable only when the rule is triggered.
"""
if self.rule.should_trigger(dt):
self.callback(context, data)
class EventRule(metaclass=ABCMeta):
"""A rule defining when a scheduled function should execute."""
# Instances of EventRule are assigned a calendar instance when scheduling
# a function.
_cal = None
@property
def cal(self):
return self._cal
@cal.setter
def cal(self, value):
self._cal = value
@abstractmethod
def should_trigger(self, dt):
"""
Checks if the rule should trigger with its current state.
This method should be pure and NOT mutate any state on the object.
"""
raise NotImplementedError("should_trigger")
class StatelessRule(EventRule):
"""
A stateless rule has no observable side effects.
This is reentrant and will always give the same result for the
same datetime.
Because these are pure, they can be composed to create new rules.
"""
def and_(self, rule):
"""
Logical and of two rules, triggers only when both rules trigger.
This follows the short circuiting rules for normal and.
"""
return ComposedRule(self, rule, ComposedRule.lazy_and)
__and__ = and_
class ComposedRule(StatelessRule):
"""
A rule that composes the results of two rules with some composing function.
The composing function should be a binary function that accepts the results
first(dt) and second(dt) as positional arguments.
For example, operator.and_.
If lazy=True, then the lazy composer is used instead. The lazy composer
expects a function that takes the two should_trigger functions and the
datetime. This is useful of you don't always want to call should_trigger
for one of the rules. For example, this is used to implement the & and |
operators so that they will have the same short circuit logic that is
expected.
"""
def __init__(self, first, second, composer):
if not (
isinstance(first, StatelessRule)
and isinstance(second, StatelessRule)
):
raise ValueError("Only two StatelessRules can be composed")
self.first = first
self.second = second
self.composer = composer
def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger, self.second.should_trigger, dt
)
@staticmethod
def lazy_and(first_should_trigger, second_should_trigger, dt):
"""
Lazily ands the two rules. This will NOT call the should_trigger of the
second rule if the first one returns False.
"""
return first_should_trigger(dt) and second_should_trigger(dt)
@property
def cal(self):
return self.first.cal
@cal.setter
def cal(self, value):
# Thread the calendar through to the underlying rules.
self.first.cal = self.second.cal = value
class Always(StatelessRule):
"""
A rule that always triggers.
"""
@staticmethod
def always_trigger(dt):
"""
A should_trigger implementation that will always trigger.
"""
return True
should_trigger = always_trigger
class Never(StatelessRule):
"""
A rule that never triggers.
"""
@staticmethod
def never_trigger(dt):
"""
A should_trigger implementation that will never trigger.
"""
return False
should_trigger = never_trigger
class AfterOpen(StatelessRule):
"""
A rule that triggers for some offset after the market opens.
Example that triggers after 30 minutes of the market opening:
>>> AfterOpen(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.AfterOpen object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the first minute.
)
self._period_start = None
self._period_end = None
self._period_close = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a date, find that day's open and period end (open + offset).
"""
period_start, period_close = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)
# Align the market open and close times here with the execution times
# used by the simulation clock. This ensures that scheduled functions
# trigger at the correct times.
self._period_start = self.cal.execution_time_from_open(period_start)
self._period_close = self.cal.execution_time_from_close(period_close)
self._period_end = self._period_start + self.offset - self._one_minute
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if self._period_start is None or self._period_close <= dt:
self.calculate_dates(dt)
return dt == self._period_end
class BeforeClose(StatelessRule):
"""
A rule that triggers for some offset time before the market closes.
Example that triggers for the last 30 minutes every day:
>>> BeforeClose(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.BeforeClose object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the last minute.
)
self._period_start = None
self._period_close = None
self._period_end = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a dt, find that day's close and period start (close - offset).
"""
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
# Align the market close time here with the execution time used by the
# simulation clock. This ensures that scheduled functions trigger at
# the correct times.
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if self._period_start is None or self._period_close <= dt:
self.calculate_dates(dt)
return self._period_start == dt
class NotHalfDay(StatelessRule):
"""
A rule that only triggers when it is not a half day.
"""
def should_trigger(self, dt):
return self.cal.minute_to_session_label(dt) not in self.cal.early_closes
class TradingDayOfWeekRule(StatelessRule, metaclass=ABCMeta):
@preprocess(n=lossless_float_to_int("TradingDayOfWeekRule"))
def __init__(self, n, invert):
if not 0 <= n < MAX_WEEK_RANGE:
raise _out_of_range_error(MAX_WEEK_RANGE)
self.td_delta = (-n - 1) if invert else n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
val = self.cal.minute_to_session_label(dt, direction="none").value
return val in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
# Group by ISO year (0) and week (1)
.groupby(sessions.map(lambda x: x.isocalendar()[0:2]))
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers on the nth trading day of the week.
This is zero-indexed, n=0 is the first trading day of the week.
"""
def __init__(self, n):
super(NthTradingDayOfWeek, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers n days before the last trading day of the week.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfWeek, self).__init__(n, invert=True)
class TradingDayOfMonthRule(StatelessRule, metaclass=ABCMeta):
@preprocess(n=lossless_float_to_int("TradingDayOfMonthRule"))
def __init__(self, n, invert):
if not 0 <= n < MAX_MONTH_RANGE:
raise _out_of_range_error(MAX_MONTH_RANGE)
if invert:
self.td_delta = -n - 1
else:
self.td_delta = n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
value = self.cal.minute_to_session_label(dt, direction="none").value
return value in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
.groupby([sessions.year, sessions.month])
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers on the nth trading day of the month.
This is zero-indexed, n=0 is the first trading day of the month.
"""
def __init__(self, n):
super(NthTradingDayOfMonth, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers n days before the last trading day of the month.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfMonth, self).__init__(n, invert=True)
# Stateful rules
class StatefulRule(EventRule):
"""
A stateful rule has state.
This rule will give different results for the same datetimes depending
on the internal state that this holds.
StatefulRules wrap other rules as state transformers.
"""
def __init__(self, rule=None):
self.rule = rule or Always()
@property
def cal(self):
return self.rule.cal
@cal.setter
def cal(self, value):
# Thread the calendar through to the underlying rule.
self.rule.cal = value
class OncePerDay(StatefulRule):
def __init__(self, rule=None):
self.triggered = False
self.date = None
self.next_date = None
super(OncePerDay, self).__init__(rule)
def should_trigger(self, dt):
if self.date is None or dt >= self.next_date:
# initialize or reset for new date
self.triggered = False
self.date = dt
# record the timestamp for the next day, so that we can use it
# to know if we've moved to the next day
self.next_date = dt + pd.Timedelta(1, unit="d")
if not self.triggered and self.rule.should_trigger(dt):
self.triggered = True
return True
# Factory API
class date_rules(object):
"""
Factories for date-based :func:`~zipline.api.schedule_function` rules.
See Also
--------
:func:`~zipline.api.schedule_function`
"""
@staticmethod
def every_day():
"""Create a rule that triggers every day.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return Always()
@staticmethod
def month_start(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days after the
start of each month.
Parameters
----------
days_offset : int, optional
Number of trading days to wait before triggering each
month. Default is 0, i.e., trigger on the first trading day of the
month.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return NthTradingDayOfMonth(n=days_offset)
@staticmethod
def month_end(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days before the
end of each month.
Parameters
----------
days_offset : int, optional
Number of trading days prior to month end to trigger. Default is 0,
i.e., trigger on the last day of the month.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return NDaysBeforeLastTradingDayOfMonth(n=days_offset)
@staticmethod
def week_start(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days after the
start of each week.
Parameters
----------
days_offset : int, optional
Number of trading days to wait before triggering each week. Default
is 0, i.e., trigger on the first trading day of the week.
"""
return NthTradingDayOfWeek(n=days_offset)
@staticmethod
def week_end(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days before the
end of each week.
Parameters
----------
days_offset : int, optional
Number of trading days prior to week end to trigger. Default is 0,
i.e., trigger on the last trading day of the week.
"""
return NDaysBeforeLastTradingDayOfWeek(n=days_offset)
class time_rules(object):
"""Factories for time-based :func:`~zipline.api.schedule_function` rules.
See Also
--------
:func:`~zipline.api.schedule_function`
"""
@staticmethod
def market_open(offset=None, hours=None, minutes=None):
"""
Create a rule that triggers at a fixed offset from market open.
The offset can be specified either as a :class:`datetime.timedelta`, or
as a number of hours and minutes.
Parameters
----------
offset : datetime.timedelta, optional
If passed, the offset from market open at which to trigger. Must be
at least 1 minute.
hours : int, optional
If passed, number of hours to wait after market open.
minutes : int, optional
If passed, number of minutes to wait after market open.
Returns
-------
rule : zipline.utils.events.EventRule
Notes
-----
If no arguments are passed, the default offset is one minute after
market open.
If ``offset`` is passed, ``hours`` and ``minutes`` must not be
passed. Conversely, if either ``hours`` or ``minutes`` are passed,
``offset`` must not be passed.
"""
return AfterOpen(offset=offset, hours=hours, minutes=minutes)
@staticmethod
def market_close(offset=None, hours=None, minutes=None):
"""
Create a rule that triggers at a fixed offset from market close.
The offset can be specified either as a :class:`datetime.timedelta`, or
as a number of hours and minutes.
Parameters
----------
offset : datetime.timedelta, optional
If passed, the offset from market close at which to trigger. Must
be at least 1 minute.
hours : int, optional
If passed, number of hours to wait before market close.
minutes : int, optional
If passed, number of minutes to wait before market close.
Returns
-------
rule : zipline.utils.events.EventRule
Notes
-----
If no arguments are passed, the default offset is one minute before
market close.
If ``offset`` is passed, ``hours`` and ``minutes`` must not be
passed. Conversely, if either ``hours`` or ``minutes`` are passed,
``offset`` must not be passed.
"""
return BeforeClose(offset=offset, hours=hours, minutes=minutes)
every_minute = Always
class calendars(object):
US_EQUITIES = sentinel("US_EQUITIES")
US_FUTURES = sentinel("US_FUTURES")
def _invert(d):
return dict(zip(d.values(), d.keys()))
_uncalled_rules = _invert(vars(date_rules))
_uncalled_rules.update(_invert(vars(time_rules)))
def _check_if_not_called(v):
try:
name = _uncalled_rules[v]
except KeyError:
if not (inspect.isclass(v) and issubclass(v, EventRule)):
return
name = getattr(v, "__name__", None)
msg = "invalid rule: %r" % (v,)
if name is not None:
msg += " (hint: did you mean %s())" % name
raise TypeError(msg)
def make_eventrule(date_rule, time_rule, cal, half_days=True):
"""
Constructs an event rule from the factory api.
"""
_check_if_not_called(date_rule)
_check_if_not_called(time_rule)
if half_days:
inner_rule = date_rule & time_rule
else:
inner_rule = date_rule & time_rule & NotHalfDay()
opd = OncePerDay(rule=inner_rule)
# This is where a scheduled function's rule is associated with a calendar.
opd.cal = cal
return opd | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/events.py | events.py |
from collections.abc import Sequence
from collections import OrderedDict
from itertools import compress
from weakref import WeakKeyDictionary, ref
from _thread import allocate_lock as Lock
from toolz.sandbox import unzip
from zipline.utils.calendar_utils import lazyval
from zipline.utils.compat import wraps
class classlazyval(lazyval):
"""Decorator that marks that an attribute of a class should not be
computed until needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import classlazyval
>>> class C(object):
... count = 0
... @classlazyval
... def val(cls):
... cls.count += 1
... return "val"
...
>>> C.count
0
>>> C.val, C.count
('val', 1)
>>> C.val, C.count
('val', 1)
"""
# We don't reassign the name on the class to implement the caching because
# then we would need to use a metaclass to track the name of the
# descriptor.
def __get__(self, instance, owner):
return super(classlazyval, self).__get__(owner, owner)
def _weak_lru_cache(maxsize=100):
"""
Users should only access the lru_cache through its public API:
cache_info, cache_clear
The internals of the lru_cache are encapsulated for thread safety and
to allow the implementation to change.
"""
def decorating_function(
user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError
):
hits, misses = [0], [0]
kwd_mark = (object(),) # separates positional and keyword args
lock = Lock() # needed because OrderedDict isn't threadsafe
if maxsize is None:
cache = _WeakArgsDict() # cache without ordering or size limit
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
cache[key] = result
misses[0] += 1
return result
else:
# ordered least recent to most recent
cache = _WeakArgsOrderedDict()
cache_popitem = cache.popitem
cache_renew = cache.move_to_end
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += kwd_mark + tuple(sorted(kwds.items()))
with lock:
try:
result = cache[key]
cache_renew(key) # record recent use of this key
hits[0] += 1
return result
except KeyError:
pass
result = user_function(*args, **kwds)
with lock:
cache[key] = result # record recent use of this key
misses[0] += 1
if len(cache) > maxsize:
# purge least recently used cache entry
cache_popitem(False)
return result
def cache_info():
"""Report cache statistics"""
with lock:
return hits[0], misses[0], maxsize, len(cache)
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
hits[0] = misses[0] = 0
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
class _WeakArgs(Sequence):
"""
Works with _WeakArgsDict to provide a weak cache for function args.
When any of those args are gc'd, the pair is removed from the cache.
"""
def __init__(self, items, dict_remove=None):
def remove(k, selfref=ref(self), dict_remove=dict_remove):
self = selfref()
if self is not None and dict_remove is not None:
dict_remove(self)
self._items, self._selectors = unzip(
self._try_ref(item, remove) for item in items
)
self._items = tuple(self._items)
self._selectors = tuple(self._selectors)
def __getitem__(self, index):
return self._items[index]
def __len__(self):
return len(self._items)
@staticmethod
def _try_ref(item, callback):
try:
return ref(item, callback), True
except TypeError:
return item, False
@property
def alive(self):
return all(
item() is not None for item in compress(self._items, self._selectors)
)
def __eq__(self, other):
return self._items == other._items
def __hash__(self):
try:
return self.__hash
except AttributeError:
h = self.__hash = hash(self._items)
return h
class _WeakArgsDict(WeakKeyDictionary, object):
def __delitem__(self, key):
del self.data[_WeakArgs(key)]
def __getitem__(self, key):
return self.data[_WeakArgs(key)]
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.data)
def __setitem__(self, key, value):
self.data[_WeakArgs(key, self._remove)] = value
def __contains__(self, key):
try:
wr = _WeakArgs(key)
except TypeError:
return False
return wr in self.data
def pop(self, key, *args):
return self.data.pop(_WeakArgs(key), *args)
class _WeakArgsOrderedDict(_WeakArgsDict, object):
def __init__(self):
super(_WeakArgsOrderedDict, self).__init__()
self.data = OrderedDict()
def popitem(self, last=True):
while True:
key, value = self.data.popitem(last)
if key.alive:
return tuple(key), value
def move_to_end(self, key):
"""Move an existing element to the end.
Raises KeyError if the element does not exist.
"""
self[key] = self.pop(key)
def weak_lru_cache(maxsize=100):
"""Weak least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
Arguments to the cached function must be hashable. Any that are weak-
referenceable will be stored by weak reference. Once any of the args have
been garbage collected, the entry will be removed from the cache.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
class desc(lazyval):
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
inst = ref(instance)
@_weak_lru_cache(maxsize)
@wraps(self._get)
def wrapper(*args, **kwargs):
return self._get(inst(), *args, **kwargs)
self._cache[instance] = wrapper
return wrapper
@_weak_lru_cache(maxsize)
def __call__(self, *args, **kwargs):
return self._get(*args, **kwargs)
return desc
remember_last = weak_lru_cache(1) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/memoize.py | memoize.py |
import zipline.api
from zipline.utils.compat import wraps
from zipline.utils.algo_instance import get_algo_instance, set_algo_instance
class ZiplineAPI(object):
"""
Context manager for making an algorithm instance available to zipline API
functions within a scoped block.
"""
def __init__(self, algo_instance):
self.algo_instance = algo_instance
def __enter__(self):
"""
Set the given algo instance, storing any previously-existing instance.
"""
self.old_algo_instance = get_algo_instance()
set_algo_instance(self.algo_instance)
def __exit__(self, _type, _value, _tb):
"""
Restore the algo instance stored in __enter__.
"""
set_algo_instance(self.old_algo_instance)
def api_method(f):
# Decorator that adds the decorated class method as a callable
# function (wrapped) to zipline.api
@wraps(f)
def wrapped(*args, **kwargs):
# Get the instance and call the method
algo_instance = get_algo_instance()
if algo_instance is None:
raise RuntimeError(
"zipline api method %s must be called during a simulation." % f.__name__
)
return getattr(algo_instance, f.__name__)(*args, **kwargs)
# Add functor to zipline.api
setattr(zipline.api, f.__name__, wrapped)
zipline.api.__all__.append(f.__name__)
f.is_api_method = True
return f
def require_not_initialized(exception):
"""
Decorator for API methods that should only be called during or before
TradingAlgorithm.initialize. `exception` will be raised if the method is
called after initialize.
Examples
--------
@require_not_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed during initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def require_initialized(exception):
"""
Decorator for API methods that should only be called after
TradingAlgorithm.initialize. `exception` will be raised if the method is
called before initialize has completed.
Examples
--------
@require_initialized(SomeException("Don't do that!"))
def method(self):
# Do stuff that should only be allowed after initialize.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if not self.initialized:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator
def disallowed_in_before_trading_start(exception):
"""
Decorator for API methods that cannot be called from within
TradingAlgorithm.before_trading_start. `exception` will be raised if the
method is called inside `before_trading_start`.
Examples
--------
@disallowed_in_before_trading_start(SomeException("Don't do that!"))
def method(self):
# Do stuff that is not allowed inside before_trading_start.
"""
def decorator(method):
@wraps(method)
def wrapped_method(self, *args, **kwargs):
if self._in_before_trading_start:
raise exception
return method(self, *args, **kwargs)
return wrapped_method
return decorator | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/api_support.py | api_support.py |
import pandas as pd
import numpy as np
from datetime import timedelta, datetime
from zipline.utils.calendar_utils import get_calendar
from zipline.sources import SpecificEquityTrades
from zipline.finance.trading import SimulationParameters
from zipline.sources.test_source import create_trade
def create_simulation_parameters(
year=2006,
start=None,
end=None,
capital_base=float("1.0e5"),
num_days=None,
data_frequency="daily",
emission_rate="daily",
trading_calendar=None,
):
if not trading_calendar:
trading_calendar = get_calendar("NYSE")
if start is None:
start = pd.Timestamp("{0}-01-01".format(year), tz="UTC")
elif type(start) == datetime:
start = pd.Timestamp(start)
if end is None:
if num_days:
start_index = trading_calendar.all_sessions.searchsorted(start)
end = trading_calendar.all_sessions[start_index + num_days - 1]
else:
end = pd.Timestamp("{0}-12-31".format(year), tz="UTC")
elif type(end) == datetime:
end = pd.Timestamp(end)
sim_params = SimulationParameters(
start_session=start,
end_session=end,
capital_base=capital_base,
data_frequency=data_frequency,
emission_rate=emission_rate,
trading_calendar=trading_calendar,
)
return sim_params
def get_next_trading_dt(current, interval, trading_calendar):
next_dt = pd.Timestamp(current).tz_convert(trading_calendar.tz)
while True:
# Convert timestamp to naive before adding day, otherwise the when
# stepping over EDT an hour is added.
next_dt = pd.Timestamp(next_dt.replace(tzinfo=None))
next_dt = next_dt + interval
next_dt = pd.Timestamp(next_dt, tz=trading_calendar.tz)
next_dt_utc = next_dt.tz_convert("UTC")
if trading_calendar.is_open_on_minute(next_dt_utc):
break
next_dt = next_dt_utc.tz_convert(trading_calendar.tz)
return next_dt_utc
def create_trade_history(
sid,
prices,
amounts,
interval,
sim_params,
trading_calendar,
source_id="test_factory",
):
trades = []
current = sim_params.first_open
oneday = timedelta(days=1)
use_midnight = interval >= oneday
for price, amount in zip(prices, amounts):
if use_midnight:
trade_dt = current.replace(hour=0, minute=0)
else:
trade_dt = current
trade = create_trade(sid, price, amount, trade_dt, source_id)
trades.append(trade)
current = get_next_trading_dt(current, interval, trading_calendar)
assert len(trades) == len(prices)
return trades
def create_returns_from_range(sim_params):
return pd.Series(
index=sim_params.sessions, data=np.random.rand(len(sim_params.sessions))
)
def create_returns_from_list(returns, sim_params):
return pd.Series(index=sim_params.sessions[: len(returns)], data=returns)
def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar):
"""
creates trade_count trades for each sid in sids list.
first trade will be on sim_params.start_session, and daily
thereafter for each sid. Thus, two sids should result in two trades per
day.
"""
return create_trade_source(
sids,
timedelta(days=1),
sim_params,
asset_finder,
trading_calendar=trading_calendar,
)
def create_trade_source(
sids, trade_time_increment, sim_params, asset_finder, trading_calendar
):
# If the sim_params define an end that is during market hours, that will be
# used as the end of the data source
if trading_calendar.is_open_on_minute(sim_params.end_session):
end = sim_params.end_session
# Otherwise, the last_close after the end_session is used as the end of the
# data source
else:
end = sim_params.last_close
args = tuple()
kwargs = {
"sids": sids,
"start": sim_params.first_open,
"end": end,
"delta": trade_time_increment,
"trading_calendar": trading_calendar,
"asset_finder": asset_finder,
}
source = SpecificEquityTrades(*args, **kwargs)
return source | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/factory.py | factory.py |
import click
import pandas as pd
from .context_tricks import CallbackManager
def maybe_show_progress(it, show_progress, **kwargs):
"""Optionally show a progress bar for the given iterator.
Parameters
----------
it : iterable
The underlying iterator.
show_progress : bool
Should progress be shown.
**kwargs
Forwarded to the click progress bar.
Returns
-------
itercontext : context manager
A context manager whose enter is the actual iterator to use.
Examples
--------
.. code-block:: python
with maybe_show_progress([1, 2, 3], True) as ns:
for n in ns:
...
"""
if show_progress:
return click.progressbar(it, **kwargs)
# context manager that just return `it` when we enter it
return CallbackManager(lambda it=it: it)
class _DatetimeParam(click.ParamType):
def __init__(self, tz=None):
self.tz = tz
def parser(self, value):
return pd.Timestamp(value, tz=self.tz)
@property
def name(self):
return type(self).__name__.upper()
def convert(self, value, param, ctx):
try:
return self.parser(value)
except ValueError:
self.fail(
"%s is not a valid %s" % (value, self.name.lower()),
param,
ctx,
)
class Timestamp(_DatetimeParam):
"""A click parameter that parses the value into pandas.Timestamp objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
"""
class Date(_DatetimeParam):
"""A click parameter that parses the value into datetime.date objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
as_timestamp : bool, optional
If True, return the value as a pd.Timestamp object normalized to
midnight.
"""
def __init__(self, tz=None, as_timestamp=False):
super(Date, self).__init__(tz=tz)
self.as_timestamp = as_timestamp
def parser(self, value):
ts = super(Date, self).parser(value)
return ts.normalize() if self.as_timestamp else ts.date()
class Time(_DatetimeParam):
"""A click parameter that parses the value into timetime.time objects.
Parameters
----------
tz : timezone-coercable, optional
The timezone to parse the string as.
By default the timezone will be infered from the string or naiive.
"""
def parser(self, value):
return super(Time, self).parser(value).time()
class Timedelta(_DatetimeParam):
"""A click parameter that parses values into pd.Timedelta objects.
Parameters
----------
unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional
Denotes the unit of the input if the input is an integer.
"""
def __init__(self, unit="ns"):
self.unit = unit
def parser(self, value):
return pd.Timedelta(value, unit=self.unit) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/cli.py | cli.py |
import abc
import logbook
from datetime import datetime
import pandas as pd
from zipline.errors import (
AccountControlViolation,
TradingControlViolation,
)
from zipline.utils.input_validation import (
expect_bounded,
expect_types,
)
from zipline.sources.TEJ_Api_Data import LIQUIDITY_RISK_COLUMNS # 20230804 (by MRC)
log = logbook.Logger("TradingControl")
class TradingControl(metaclass=abc.ABCMeta):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, on_error, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.on_error = on_error
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Before any order is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingControl object.
If the specified asset and amount do not violate this TradingControl's
restraint given the information in `portfolio`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this TradingControl's contraint, this
method should call self.fail(asset, amount).
"""
raise NotImplementedError
def _constraint_msg(self, metadata):
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint, metadata=metadata
)
return constraint
def handle_violation(self, asset, amount, datetime, metadata=None):
"""
Handle a TradingControlViolation, either by raising or logging and
error with information about the failure.
If dynamic information should be displayed as well, pass it in via
`metadata`.
"""
constraint = self._constraint_msg(metadata)
if self.on_error == "fail":
raise TradingControlViolation(
asset=asset, amount=amount, datetime=datetime, constraint=constraint
)
elif self.on_error == "log":
log.error(
"Order for {amount} shares of {asset} at {dt} "
"violates trading constraint {constraint}",
amount=amount,
asset=asset,
dt=datetime,
constraint=constraint,
)
def __repr__(self):
return "{name}({attrs})".format(
name=self.__class__.__name__, attrs=self.__fail_args
)
class MaxOrderCount(TradingControl):
"""
TradingControl representing a limit on the number of orders that can be
placed in a given trading day.
"""
def __init__(self, on_error, max_count):
super(MaxOrderCount, self).__init__(on_error, max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if we've already placed self.max_count orders today.
"""
algo_date = algo_datetime.date()
# Reset order count if it's a new day.
if self.current_date and self.current_date != algo_date:
self.orders_placed = 0
self.current_date = algo_date
if self.orders_placed >= self.max_count:
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1
class RestrictedListOrder(TradingControl):
"""TradingControl representing a restricted list of assets that
cannot be ordered by the algorithm.
Parameters
----------
restrictions : zipline.finance.asset_restrictions.Restrictions
Object representing restrictions of a group of assets.
"""
def __init__(self, on_error, restrictions):
super(RestrictedListOrder, self).__init__(on_error)
self.restrictions = restrictions
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if the asset is in the restricted_list.
"""
if self.restrictions.is_restricted(asset, algo_datetime):
self.handle_violation(asset, amount, algo_datetime)
class MaxOrderSize(TradingControl):
"""
TradingControl representing a limit on the magnitude of any single order
placed with the given asset. Can be specified by share or by dollar
value.
"""
def __init__(self, on_error, asset=None, max_shares=None, max_notional=None):
super(MaxOrderSize, self).__init__(
on_error, asset=asset, max_shares=max_shares, max_notional=max_notional
)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError("Must supply at least one of max_shares and max_notional")
if max_shares and max_shares < 0:
raise ValueError("max_shares cannot be negative.")
if max_notional and max_notional < 0:
raise ValueError("max_notional must be positive.")
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if the magnitude of the given order exceeds either self.max_shares
or self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
if self.max_shares is not None and abs(amount) > self.max_shares:
self.handle_violation(asset, amount, algo_datetime)
current_asset_price = algo_current_data.current(asset, "price")
order_value = amount * current_asset_price
too_much_value = (
self.max_notional is not None and abs(order_value) > self.max_notional
)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class MaxPositionSize(TradingControl):
"""
TradingControl representing a limit on the maximum position size that can
be held by an algo for a given asset.
"""
def __init__(self, on_error, asset=None, max_shares=None, max_notional=None):
super(MaxPositionSize, self).__init__(
on_error, asset=asset, max_shares=max_shares, max_notional=max_notional
)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if max_shares is None and max_notional is None:
raise ValueError("Must supply at least one of max_shares and max_notional")
if max_shares and max_shares < 0:
raise ValueError("max_shares cannot be negative.")
if max_notional and max_notional < 0:
raise ValueError("max_notional must be positive.")
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if the given order would cause the magnitude of our position to be
greater in shares than self.max_shares or greater in dollar value than
self.max_notional.
"""
if self.asset is not None and self.asset != asset:
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = current_share_count + amount
too_many_shares = (
self.max_shares is not None and abs(shares_post_order) > self.max_shares
)
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, "price")
value_post_order = shares_post_order * current_price
too_much_value = (
self.max_notional is not None and abs(value_post_order) > self.max_notional
)
if too_much_value:
self.handle_violation(asset, amount, algo_datetime)
class LongOnly(TradingControl):
"""
TradingControl representing a prohibition against holding short positions.
"""
def __init__(self, on_error):
super(LongOnly, self).__init__(on_error)
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if we would hold negative shares of asset after completing this
order.
"""
if portfolio.positions[asset].amount + amount < 0:
self.handle_violation(asset, amount, algo_datetime)
class AssetDateBounds(TradingControl):
"""
TradingControl representing a prohibition against ordering an asset before
its start_date, or after its end_date.
"""
def __init__(self, on_error):
super(AssetDateBounds, self).__init__(on_error)
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
"""
Fail if the algo has passed this Asset's end_date, or before the
Asset's start date.
"""
# If the order is for 0 shares, then silently pass through.
if amount == 0:
return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
# Fail if the algo is before this Asset's start_date
if asset.start_date:
normalized_start = pd.Timestamp(asset.start_date).normalize()
if normalized_algo_dt < normalized_start:
metadata = {"asset_start_date": normalized_start}
self.handle_violation(asset, amount, algo_datetime, metadata=metadata)
# Fail if the algo has passed this Asset's end_date
if asset.end_date:
normalized_end = pd.Timestamp(asset.end_date).normalize()
if normalized_algo_dt > normalized_end:
metadata = {"asset_end_date": normalized_end}
self.handle_violation(asset, amount, algo_datetime, metadata=metadata)
class AccountControl(metaclass=abc.ABCMeta):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.fail.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data):
"""
On each call to handle data by TradingAlgorithm, this method should be
called *exactly once* on each registered AccountControl object.
If the check does not violate this AccountControl's restraint given
the information in `portfolio` and `account`, this method should
return None and have no externally-visible side-effects.
If the desired order violates this AccountControl's contraint, this
method should call self.fail().
"""
raise NotImplementedError
def fail(self):
"""
Raise an AccountControlViolation with information about the failure.
"""
raise AccountControlViolation(constraint=repr(self))
def __repr__(self):
return "{name}({attrs})".format(
name=self.__class__.__name__, attrs=self.__fail_args
)
class MaxLeverage(AccountControl):
"""
AccountControl representing a limit on the maximum leverage allowed
by the algorithm.
"""
def __init__(self, max_leverage):
"""
max_leverage is the gross leverage in decimal form. For example,
2, limits an algorithm to trading at most double the account value.
"""
super(MaxLeverage, self).__init__(max_leverage=max_leverage)
self.max_leverage = max_leverage
if max_leverage is None:
raise ValueError("Must supply max_leverage")
if max_leverage < 0:
raise ValueError("max_leverage must be positive")
def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data):
"""
Fail if the leverage is greater than the allowed leverage.
"""
if _account.leverage > self.max_leverage:
self.fail()
class MinLeverage(AccountControl):
"""AccountControl representing a limit on the minimum leverage allowed
by the algorithm after a threshold period of time.
Parameters
----------
min_leverage : float
The gross leverage in decimal form.
deadline : datetime
The date the min leverage must be achieved by.
For example, min_leverage=2 limits an algorithm to trading at minimum
double the account value by the deadline date.
"""
@expect_types(
__funcname="MinLeverage", min_leverage=(int, float), deadline=datetime
)
@expect_bounded(__funcname="MinLeverage", min_leverage=(0, None))
def __init__(self, min_leverage, deadline):
super(MinLeverage, self).__init__(min_leverage=min_leverage, deadline=deadline)
self.min_leverage = min_leverage
self.deadline = deadline
def validate(self, _portfolio, account, algo_datetime, _algo_current_data):
"""
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
"""
if algo_datetime > self.deadline and account.leverage < self.min_leverage:
self.fail()
####################
# Trading Policy #
####################
# 20230804 (by MRC) 新增Trading Policy功能
class TradingPolicy(metaclass=abc.ABCMeta):
"""
Abstract base class representing a fail-safe control on the behavior of any
algorithm.
"""
def __init__(self, **kwargs):
"""
Track any arguments that should be printed in the error message
generated by self.handle_violation.
"""
self.__fail_args = kwargs
@abc.abstractmethod
def validate(self, asset, algo_datetime, algo_current_data):
"""
Before any transcation is executed by TradingAlgorithm, this method should be
called *exactly once* on each registered TradingPolicy object.
If the specified asset do not violate this TradingPolicy's
restraint, this method should return False.
If the desired transcation violates this TradingPolicy's contraint, this
method should call self.handle_violation.
"""
raise NotImplementedError
def _constraint_msg(self, metadata, constraint=None):
if not constraint:
constraint = repr(self)
if metadata:
constraint = "{constraint} (Metadata: {metadata})".format(
constraint=constraint, metadata=metadata
)
return constraint
def handle_violation(self, asset, datetime, metadata=None, constraint=None):
"""
Handle a TradingPolicyViolation, either by logging with information about the violation.
If dynamic information should be displayed as well, pass it in via `metadata`.
"""
constraint = self._constraint_msg(metadata, constraint)
log.info(
"{asset} violates trading constraint {constraint} at {dt} ",
asset=asset,
constraint=constraint,
dt=datetime,
)
def __repr__(self):
return "{name}({attrs})".format(
name=self.__class__.__name__, attrs=self.__fail_args
)
class ExcludeIlliquidAsset(TradingPolicy):
"""TradingPolicy representing a limit on the illiquid assets allowed
by the algorithm.(see also:zipline.sources.TEJ_Api_Data.LIQUIDITY_RISK_COLUMNS)
Parameters
----------
rules : list
see:zipline.sources.TEJ_Api_Data.LIQUIDITY_RISK_COLUMNS
log : bool
log:whether the logging is active or inactive.
For example, *rules=Full_Delivery limits an algorithm to trading on Full Delivery assets.
"""
def __init__(self, rules, log):
super(ExcludeIlliquidAsset, self).__init__(rules=rules,
log=log)
self.log = log
self.rules = rules
def validate(self, asset, algo_datetime, algo_current_data):
"""
Temporarily suspended the trading if data.current(asset, LIQUIDITY_RISK_COLUMNS.get(rule))=='Y'.
"""
# If the order is for 0 shares, then silently pass through.
# if amount == 0:
# return
normalized_algo_dt = pd.Timestamp(algo_datetime).normalize()
for rule in self.rules:
if algo_current_data.current(asset, LIQUIDITY_RISK_COLUMNS.get(rule))=='Y':
metadata = {"asset":asset}
if self.log:
self.handle_violation(asset, normalized_algo_dt, metadata=metadata, constraint=str('"{}"').format(rule))
return False
else:
continue
return True | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/controls.py | controls.py |
TRADING_DAYS_IN_YEAR = 250
TRADING_HOURS_IN_DAY = 6.5
MINUTES_IN_HOUR = 60
ANNUALIZER = {
"daily": TRADING_DAYS_IN_YEAR,
"hourly": TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY,
"minute": TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY * MINUTES_IN_HOUR,
}
# NOTE: It may be worth revisiting how the keys for this dictionary are
# specified, for instance making them ContinuousFuture objects instead of
# static strings.
FUTURE_EXCHANGE_FEES_BY_SYMBOL = {
"AD": 1.60, # AUD
"AI": 0.96, # Bloomberg Commodity Index
"BD": 1.50, # Big Dow
"BO": 1.95, # Soybean Oil
"BP": 1.60, # GBP
"CD": 1.60, # CAD
"CL": 1.50, # Crude Oil
"CM": 1.03, # Corn e-mini
"CN": 1.95, # Corn
"DJ": 1.50, # Dow Jones
"EC": 1.60, # Euro FX
"ED": 1.25, # Eurodollar
"EE": 1.50, # Euro FX e-mini
"EI": 1.50, # MSCI Emerging Markets mini
"EL": 1.50, # Eurodollar NYSE LIFFE
"ER": 0.65, # Russell2000 e-mini
"ES": 1.18, # SP500 e-mini
"ET": 1.50, # Ethanol
"EU": 1.50, # Eurodollar e-micro
"FC": 2.03, # Feeder Cattle
"FF": 0.96, # 3-Day Federal Funds
"FI": 0.56, # Deliverable Interest Rate Swap 5y
"FS": 1.50, # Interest Rate Swap 5y
"FV": 0.65, # US 5y
"GC": 1.50, # Gold
"HG": 1.50, # Copper
"HO": 1.50, # Heating Oil
"HU": 1.50, # Unleaded Gasoline
"JE": 0.16, # JPY e-mini
"JY": 1.60, # JPY
"LB": 2.03, # Lumber
"LC": 2.03, # Live Cattle
"LH": 2.03, # Lean Hogs
"MB": 1.50, # Municipal Bonds
"MD": 1.50, # SP400 Midcap
"ME": 1.60, # MXN
"MG": 1.50, # MSCI EAFE mini
"MI": 1.18, # SP400 Midcap e-mini
"MS": 1.03, # Soybean e-mini
"MW": 1.03, # Wheat e-mini
"ND": 1.50, # Nasdaq100
"NG": 1.50, # Natural Gas
"NK": 2.15, # Nikkei225
"NQ": 1.18, # Nasdaq100 e-mini
"NZ": 1.60, # NZD
"OA": 1.95, # Oats
"PA": 1.50, # Palladium
"PB": 1.50, # Pork Bellies
"PL": 1.50, # Platinum
"QG": 0.50, # Natural Gas e-mini
"QM": 1.20, # Crude Oil e-mini
"RM": 1.50, # Russell1000 e-mini
"RR": 1.95, # Rough Rice
"SB": 2.10, # Sugar
"SF": 1.60, # CHF
"SM": 1.95, # Soybean Meal
"SP": 2.40, # SP500
"SV": 1.50, # Silver
"SY": 1.95, # Soybean
"TB": 1.50, # Treasury Bills
"TN": 0.56, # Deliverable Interest Rate Swap 10y
"TS": 1.50, # Interest Rate Swap 10y
"TU": 1.50, # US 2y
"TY": 0.75, # US 10y
"UB": 0.85, # Ultra Tbond
"US": 0.80, # US 30y
"VX": 1.50, # VIX
"WC": 1.95, # Wheat
"XB": 1.50, # RBOB Gasoline
"XG": 0.75, # Gold e-mini
"YM": 1.50, # Dow Jones e-mini
"YS": 0.75, # Silver e-mini
}
# See `zipline.finance.slippage.VolatilityVolumeShare` for more information on
# how these constants are used.
DEFAULT_ETA = 0.049018143225019836
ROOT_SYMBOL_TO_ETA = {
"AD": DEFAULT_ETA, # AUD
"AI": DEFAULT_ETA, # Bloomberg Commodity Index
"BD": 0.050346811117733474, # Big Dow
"BO": 0.054930995070046298, # Soybean Oil
"BP": 0.047841544238716338, # GBP
"CD": 0.051124420640250717, # CAD
"CL": 0.04852544628414196, # Crude Oil
"CM": 0.052683478163348625, # Corn e-mini
"CN": 0.053499718390037809, # Corn
"DJ": 0.02313009072076987, # Dow Jones
"EC": 0.04885131067661861, # Euro FX
"ED": 0.094184297090245755, # Eurodollar
"EE": 0.048713151357687556, # Euro FX e-mini
"EI": 0.031712708439692663, # MSCI Emerging Markets mini
"EL": 0.044207422018209361, # Eurodollar NYSE LIFFE
"ER": 0.045930567737711307, # Russell2000 e-mini
"ES": 0.047304418321993502, # SP500 e-mini
"ET": DEFAULT_ETA, # Ethanol
"EU": 0.049750396084029064, # Eurodollar e-micro
"FC": 0.058728734202178494, # Feeder Cattle
"FF": 0.048970591527624042, # 3-Day Federal Funds
"FI": 0.033477176738170772, # Deliverable Interest Rate Swap 5y
"FS": 0.034557788010453824, # Interest Rate Swap 5y
"FV": 0.046544427716056963, # US 5y
"GC": 0.048933313546125207, # Gold
"HG": 0.052238417524987799, # Copper
"HO": 0.045061318412156062, # Heating Oil
"HU": 0.017154313062463938, # Unleaded Gasoline
"JE": 0.013948949613401812, # JPY e-mini
"JY": DEFAULT_ETA, # JPY
"LB": 0.06146586386903994, # Lumber
"LC": 0.055853801862858619, # Live Cattle
"LH": 0.057557004630219781, # Lean Hogs
"MB": DEFAULT_ETA, # Municipal Bonds
"MD": DEFAULT_ETA, # SP400 Midcap
"ME": 0.030383767727818548, # MXN
"MG": 0.029579261656151684, # MSCI EAFE mini
"MI": 0.041026288873007355, # SP400 Midcap e-mini
"MS": DEFAULT_ETA, # Soybean e-mini
"MW": 0.052579919663880245, # Wheat e-mini
"ND": DEFAULT_ETA, # Nasdaq100
"NG": 0.047897809233755716, # Natural Gas
"NK": 0.044555435054791433, # Nikkei225
"NQ": 0.044772425085977945, # Nasdaq100 e-mini
"NZ": 0.049170418073872041, # NZD
"OA": 0.056973267232775522, # Oats
"PA": DEFAULT_ETA, # Palladium
"PB": DEFAULT_ETA, # Pork Bellies
"PL": 0.054579379665647493, # Platinum
"QG": DEFAULT_ETA, # Natural Gas e-mini
"QM": DEFAULT_ETA, # Crude Oil e-mini
"RM": 0.037425041244579654, # Russell1000 e-mini
"RR": DEFAULT_ETA, # Rough Rice
"SB": 0.057388160345668134, # Sugar
"SF": 0.047784825569615726, # CHF
"SM": 0.048552860559844223, # Soybean Meal
"SP": DEFAULT_ETA, # SP500
"SV": 0.052691435039931109, # Silver
"SY": 0.052041703657281613, # Soybean
"TB": DEFAULT_ETA, # Treasury Bills
"TN": 0.033363465365262503, # Deliverable Interest Rate Swap 10y
"TS": 0.032908878455069152, # Interest Rate Swap 10y
"TU": 0.063867646063840794, # US 2y
"TY": 0.050586988554700826, # US 10y
"UB": DEFAULT_ETA, # Ultra Tbond
"US": 0.047984179873590722, # US 30y
"VX": DEFAULT_ETA, # VIX
"WC": 0.052636542119329242, # Wheat
"XB": 0.044444916388854484, # RBOB Gasoline
"XG": DEFAULT_ETA, # Gold e-mini
"YM": DEFAULT_ETA, # Dow Jones e-mini
"YS": DEFAULT_ETA, # Silver e-mini
} | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/constants.py | constants.py |
import abc
from numpy import vectorize
from functools import partial, reduce
import operator
import pandas as pd
from collections import namedtuple
from toolz import groupby
from enum import IntEnum
from zipline.utils.numpy_utils import vectorized_is_element
from zipline.assets import Asset
Restriction = namedtuple("Restriction", ["asset", "effective_date", "state"])
RESTRICTION_STATES = IntEnum(
"RESTRICTION_STATES",
[
"ALLOWED",
"FROZEN",
],
start=0,
)
class Restrictions(metaclass=abc.ABCMeta):
"""
Abstract restricted list interface, representing a set of assets that an
algorithm is restricted from trading.
"""
@abc.abstractmethod
def is_restricted(self, assets, dt):
"""
Is the asset restricted (RestrictionStates.FROZEN) on the given dt?
Parameters
----------
asset : Asset of iterable of Assets
The asset(s) for which we are querying a restriction
dt : pd.Timestamp
The timestamp of the restriction query
Returns
-------
is_restricted : bool or pd.Series[bool] indexed by asset
Is the asset or assets restricted on this dt?
"""
raise NotImplementedError("is_restricted")
def __or__(self, other_restriction):
"""Base implementation for combining two restrictions."""
# If the right side is a _UnionRestrictions, defers to the
# _UnionRestrictions implementation of `|`, which intelligently
# flattens restricted lists
if isinstance(other_restriction, _UnionRestrictions):
return other_restriction | self
return _UnionRestrictions([self, other_restriction])
class _UnionRestrictions(Restrictions):
"""
A union of a number of sub restrictions.
Parameters
----------
sub_restrictions : iterable of Restrictions (but not _UnionRestrictions)
The Restrictions to be added together
Notes
-----
- Consumers should not construct instances of this class directly, but
instead use the `|` operator to combine restrictions
"""
def __new__(cls, sub_restrictions):
# Filter out NoRestrictions and deal with resulting cases involving
# one or zero sub_restrictions
sub_restrictions = [
r for r in sub_restrictions if not isinstance(r, NoRestrictions)
]
if len(sub_restrictions) == 0:
return NoRestrictions()
elif len(sub_restrictions) == 1:
return sub_restrictions[0]
new_instance = super(_UnionRestrictions, cls).__new__(cls)
new_instance.sub_restrictions = sub_restrictions
return new_instance
def __or__(self, other_restriction):
"""
Overrides the base implementation for combining two restrictions, of
which the left side is a _UnionRestrictions.
"""
# Flatten the underlying sub restrictions of _UnionRestrictions
if isinstance(other_restriction, _UnionRestrictions):
new_sub_restrictions = (
self.sub_restrictions + other_restriction.sub_restrictions
)
else:
new_sub_restrictions = self.sub_restrictions + [other_restriction]
return _UnionRestrictions(new_sub_restrictions)
def is_restricted(self, assets, dt):
if isinstance(assets, Asset):
return any(
r.is_restricted(assets, dt) for r in self.sub_restrictions
)
return reduce(
operator.or_,
(r.is_restricted(assets, dt) for r in self.sub_restrictions),
)
class NoRestrictions(Restrictions):
"""
A no-op restrictions that contains no restrictions.
"""
def is_restricted(self, assets, dt):
if isinstance(assets, Asset):
return False
return pd.Series(index=pd.Index(assets), data=False)
class StaticRestrictions(Restrictions):
"""
Static restrictions stored in memory that are constant regardless of dt
for each asset.
Parameters
----------
restricted_list : iterable of assets
The assets to be restricted
"""
def __init__(self, restricted_list):
self._restricted_set = frozenset(restricted_list)
def is_restricted(self, assets, dt):
"""
An asset is restricted for all dts if it is in the static list.
"""
if isinstance(assets, Asset):
return assets in self._restricted_set
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, self._restricted_set),
)
class HistoricalRestrictions(Restrictions):
"""
Historical restrictions stored in memory with effective dates for each
asset.
Parameters
----------
restrictions : iterable of namedtuple Restriction
The restrictions, each defined by an asset, effective date and state
"""
def __init__(self, restrictions):
# A dict mapping each asset to its restrictions, which are sorted by
# ascending order of effective_date
self._restrictions_by_asset = {
asset: sorted(
restrictions_for_asset, key=lambda x: x.effective_date
)
for asset, restrictions_for_asset in groupby(
lambda x: x.asset, restrictions
).items()
}
def is_restricted(self, assets, dt):
"""
Returns whether or not an asset or iterable of assets is restricted
on a dt.
"""
if isinstance(assets, Asset):
return self._is_restricted_for_asset(assets, dt)
is_restricted = partial(self._is_restricted_for_asset, dt=dt)
return pd.Series(
index=pd.Index(assets),
data=vectorize(is_restricted, otypes=[bool])(assets),
)
def _is_restricted_for_asset(self, asset, dt):
state = RESTRICTION_STATES.ALLOWED
for r in self._restrictions_by_asset.get(asset, ()):
if r.effective_date > dt:
break
state = r.state
return state == RESTRICTION_STATES.FROZEN
class SecurityListRestrictions(Restrictions):
"""
Restrictions based on a security list.
Parameters
----------
restrictions : zipline.utils.security_list.SecurityList
The restrictions defined by a SecurityList
"""
def __init__(self, security_list_by_dt):
self.current_securities = security_list_by_dt.current_securities
def is_restricted(self, assets, dt):
securities_in_list = self.current_securities(dt)
if isinstance(assets, Asset):
return assets in securities_in_list
return pd.Series(
index=pd.Index(assets),
data=vectorized_is_element(assets, securities_in_list),
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/asset_restrictions.py | asset_restrictions.py |
from abc import abstractmethod
from collections import defaultdict
from toolz import merge
from zipline.assets import Equity, Future
from zipline.finance.constants import FUTURE_EXCHANGE_FEES_BY_SYMBOL
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.utils.dummy import DummyMapping
DEFAULT_PER_SHARE_COST = 0.001 # 0.1 cents per share
DEFAULT_PER_CONTRACT_COST = 0.85 # $0.85 per future contract
DEFAULT_PER_DOLLAR_COST = 0.0015 # 0.15 cents per dollar
DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE = 0.0 # $0 per trade
DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE = 0.0 # $0 per trade
class CommissionModel(metaclass=FinancialModelMeta):
"""Abstract base class for commission models.
Commission models are responsible for accepting order/transaction pairs and
calculating how much commission should be charged to an algorithm's account
on each transaction.
To implement a new commission model, create a subclass of
:class:`~zipline.finance.commission.CommissionModel` and implement
:meth:`calculate`.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
@abstractmethod
def calculate(self, order, transaction):
"""
Calculate the amount of commission to charge on ``order`` as a result
of ``transaction``.
Parameters
----------
order : zipline.finance.order.Order
The order being processed.
The ``commission`` field of ``order`` is a float indicating the
amount of commission already charged on this order.
transaction : zipline.finance.transaction.Transaction
The transaction being processed. A single order may generate
multiple transactions if there isn't enough volume in a given bar
to fill the full amount requested in the order.
Returns
-------
amount_charged : float
The additional commission, in dollars, that we should attribute to
this order.
"""
raise NotImplementedError("calculate")
class NoCommission(CommissionModel):
"""Model commissions as free.
Notes
-----
This is primarily used for testing.
"""
@staticmethod
def calculate(order, transaction):
return 0.0
# todo: update to Python3
class EquityCommissionModel(CommissionModel, metaclass=AllowedAssetMarker):
"""
Base class for commission models which only support equities.
"""
allowed_asset_types = (Equity,)
# todo: update to Python3
class FutureCommissionModel(CommissionModel, metaclass=AllowedAssetMarker):
"""
Base class for commission models which only support futures.
"""
allowed_asset_types = (Future,)
def calculate_per_unit_commission(
order, transaction, cost_per_unit, initial_commission, min_trade_cost
):
"""
If there is a minimum commission:
If the order hasn't had a commission paid yet, pay the minimum
commission.
If the order has paid a commission, start paying additional
commission once the minimum commission has been reached.
If there is no minimum commission:
Pay commission based on number of units in the transaction.
"""
additional_commission = abs(transaction.amount * cost_per_unit)
if order.commission == 0:
# no commission paid yet, pay at least the minimum plus a one-time
# exchange fee.
return max(min_trade_cost, additional_commission + initial_commission)
else:
# we've already paid some commission, so figure out how much we
# would be paying if we only counted per unit.
per_unit_total = (
abs(order.filled * cost_per_unit)
+ additional_commission
+ initial_commission
)
if per_unit_total < min_trade_cost:
# if we haven't hit the minimum threshold yet, don't pay
# additional commission
return 0
else:
# we've exceeded the threshold, so pay more commission.
return per_unit_total - order.commission
class PerShare(EquityCommissionModel):
"""
Calculates a commission for a transaction based on a per share cost with
an optional minimum cost per trade.
Parameters
----------
cost : float, optional
The amount of commissions paid per share traded. Default is one tenth
of a cent per share.
min_trade_cost : float, optional
The minimum amount of commissions paid per trade. Default is no
minimum.
Notes
-----
This is zipline's default commission model for equities.
"""
def __init__(
self,
cost=DEFAULT_PER_SHARE_COST,
min_trade_cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE,
):
self.cost_per_share = float(cost)
self.min_trade_cost = min_trade_cost or 0
def __repr__(self):
return (
"{class_name}(cost_per_share={cost_per_share}, "
"min_trade_cost={min_trade_cost})".format(
class_name=self.__class__.__name__,
cost_per_share=self.cost_per_share,
min_trade_cost=self.min_trade_cost,
)
)
def calculate(self, order, transaction):
return calculate_per_unit_commission(
order=order,
transaction=transaction,
cost_per_unit=self.cost_per_share,
initial_commission=0,
min_trade_cost=self.min_trade_cost,
)
class PerContract(FutureCommissionModel):
"""
Calculates a commission for a transaction based on a per contract cost with
an optional minimum cost per trade.
Parameters
----------
cost : float or dict
The amount of commissions paid per contract traded. If given a float,
the commission for all futures contracts is the same. If given a
dictionary, it must map root symbols to the commission cost for
contracts of that symbol.
exchange_fee : float or dict
A flat-rate fee charged by the exchange per trade. This value is a
constant, one-time charge no matter how many contracts are being
traded. If given a float, the fee for all contracts is the same. If
given a dictionary, it must map root symbols to the fee for contracts
of that symbol.
min_trade_cost : float, optional
The minimum amount of commissions paid per trade.
"""
def __init__(
self,
cost,
exchange_fee,
min_trade_cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE,
):
# If 'cost' or 'exchange fee' are constants, use a dummy mapping to
# treat them as a dictionary that always returns the same value.
# NOTE: These dictionary does not handle unknown root symbols, so it
# may be worth revisiting this behavior.
if isinstance(cost, (int, float)):
self._cost_per_contract = DummyMapping(float(cost))
else:
# Cost per contract is a dictionary. If the user's dictionary does
# not provide a commission cost for a certain contract, fall back
# on the pre-defined cost values per root symbol.
self._cost_per_contract = defaultdict(
lambda: DEFAULT_PER_CONTRACT_COST, **cost
)
if isinstance(exchange_fee, (int, float)):
self._exchange_fee = DummyMapping(float(exchange_fee))
else:
# Exchange fee is a dictionary. If the user's dictionary does not
# provide an exchange fee for a certain contract, fall back on the
# pre-defined exchange fees per root symbol.
self._exchange_fee = merge(
FUTURE_EXCHANGE_FEES_BY_SYMBOL,
exchange_fee,
)
self.min_trade_cost = min_trade_cost or 0
def __repr__(self):
if isinstance(self._cost_per_contract, DummyMapping):
# Cost per contract is a constant, so extract it.
cost_per_contract = self._cost_per_contract["dummy key"]
else:
cost_per_contract = "<varies>"
if isinstance(self._exchange_fee, DummyMapping):
# Exchange fee is a constant, so extract it.
exchange_fee = self._exchange_fee["dummy key"]
else:
exchange_fee = "<varies>"
return "{class_name}(cost_per_contract={cost_per_contract}, " "exchange_fee={exchange_fee}, min_trade_cost={min_trade_cost})".format(
class_name=self.__class__.__name__,
cost_per_contract=cost_per_contract,
exchange_fee=exchange_fee,
min_trade_cost=self.min_trade_cost,
)
def calculate(self, order, transaction):
root_symbol = order.asset.root_symbol
cost_per_contract = self._cost_per_contract[root_symbol]
exchange_fee = self._exchange_fee[root_symbol]
return calculate_per_unit_commission(
order=order,
transaction=transaction,
cost_per_unit=cost_per_contract,
initial_commission=exchange_fee,
min_trade_cost=self.min_trade_cost,
)
class PerTrade(CommissionModel):
"""
Calculates a commission for a transaction based on a per trade cost.
For orders that require multiple fills, the full commission is charged to
the first fill.
Parameters
----------
cost : float, optional
The flat amount of commissions paid per equity trade.
"""
def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE):
"""
Cost parameter is the cost of a trade, regardless of share count.
$5.00 per trade is fairly typical of discount brokers.
"""
# Cost needs to be floating point so that calculation using division
# logic does not floor to an integer.
self.cost = float(cost)
def __repr__(self):
return "{class_name}(cost_per_trade={cost})".format(
class_name=self.__class__.__name__,
cost=self.cost,
)
def calculate(self, order, transaction):
"""
If the order hasn't had a commission paid yet, pay the fixed
commission.
"""
if order.commission == 0:
# if the order hasn't had a commission attributed to it yet,
# that's what we need to pay.
return self.cost
else:
# order has already had commission attributed, so no more
# commission.
return 0.0
class PerFutureTrade(PerContract):
"""
Calculates a commission for a transaction based on a per trade cost.
Parameters
----------
cost : float or dict
The flat amount of commissions paid per trade, regardless of the number
of contracts being traded. If given a float, the commission for all
futures contracts is the same. If given a dictionary, it must map root
symbols to the commission cost for trading contracts of that symbol.
"""
def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE):
# The per-trade cost can be represented as the exchange fee in a
# per-contract model because the exchange fee is just a one time cost
# incurred on the first fill.
super(PerFutureTrade, self).__init__(
cost=0,
exchange_fee=cost,
min_trade_cost=0,
)
self._cost_per_trade = self._exchange_fee
def __repr__(self):
if isinstance(self._cost_per_trade, DummyMapping):
# Cost per trade is a constant, so extract it.
cost_per_trade = self._cost_per_trade["dummy key"]
else:
cost_per_trade = "<varies>"
return "{class_name}(cost_per_trade={cost_per_trade})".format(
class_name=self.__class__.__name__,
cost_per_trade=cost_per_trade,
)
class PerDollar(EquityCommissionModel):
"""
Model commissions by applying a fixed cost per dollar transacted.
Parameters
----------
cost : float, optional
The flat amount of commissions paid per dollar of equities
traded. Default is a commission of $0.0015 per dollar transacted.
"""
def __init__(self, cost=DEFAULT_PER_DOLLAR_COST):
"""
Cost parameter is the cost of a trade per-dollar. 0.0015
on $1 million means $1,500 commission (=1M * 0.0015)
"""
self.cost_per_dollar = float(cost)
def __repr__(self):
return "{class_name}(cost_per_dollar={cost})".format(
class_name=self.__class__.__name__, cost=self.cost_per_dollar
)
def calculate(self, order, transaction):
"""
Pay commission based on dollar value of shares.
"""
cost_per_share = transaction.price * self.cost_per_dollar
return abs(transaction.amount) * cost_per_share | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/commission.py | commission.py |
import abc
from sys import float_info
from numpy import isfinite
import zipline.utils.math_utils as zp_math
from zipline.errors import BadOrderParameters
from zipline.utils.compat import consistent_round
class ExecutionStyle(metaclass=abc.ABCMeta):
"""Base class for order execution styles."""
_exchange = None
@abc.abstractmethod
def get_limit_price(self, is_buy):
"""
Get the limit price for this order.
Returns either None or a numerical value >= 0.
"""
raise NotImplementedError
@abc.abstractmethod
def get_stop_price(self, is_buy):
"""
Get the stop price for this order.
Returns either None or a numerical value >= 0.
"""
raise NotImplementedError
@property
def exchange(self):
"""
The exchange to which this order should be routed.
"""
return self._exchange
class MarketOrder(ExecutionStyle):
"""
Execution style for orders to be filled at current market price.
This is the default for orders placed with :func:`~zipline.api.order`.
"""
def __init__(self, exchange=None):
self._exchange = exchange
def get_limit_price(self, _is_buy):
return None
def get_stop_price(self, _is_buy):
return None
class LimitOrder(ExecutionStyle):
"""
Execution style for orders to be filled at a price equal to or better than
a specified limit price.
Parameters
----------
limit_price : float
Maximum price for buys, or minimum price for sells, at which the order
should be filled.
"""
def __init__(self, limit_price, asset=None, exchange=None):
check_stoplimit_prices(limit_price, "limit")
self.limit_price = limit_price
self._exchange = exchange
self.asset = asset
def get_limit_price(self, is_buy):
return asymmetric_round_price(
self.limit_price,
is_buy,
tick_size=(0.01 if self.asset is None else self.asset.tick_size),
)
def get_stop_price(self, _is_buy):
return None
class StopOrder(ExecutionStyle):
"""
Execution style representing a market order to be placed if market price
reaches a threshold.
Parameters
----------
stop_price : float
Price threshold at which the order should be placed. For sells, the
order will be placed if market price falls below this value. For buys,
the order will be placed if market price rises above this value.
"""
def __init__(self, stop_price, asset=None, exchange=None):
check_stoplimit_prices(stop_price, "stop")
self.stop_price = stop_price
self._exchange = exchange
self.asset = asset
def get_limit_price(self, _is_buy):
return None
def get_stop_price(self, is_buy):
return asymmetric_round_price(
self.stop_price,
not is_buy,
tick_size=(0.01 if self.asset is None else self.asset.tick_size),
)
class StopLimitOrder(ExecutionStyle):
"""
Execution style representing a limit order to be placed if market price
reaches a threshold.
Parameters
----------
limit_price : float
Maximum price for buys, or minimum price for sells, at which the order
should be filled, if placed.
stop_price : float
Price threshold at which the order should be placed. For sells, the
order will be placed if market price falls below this value. For buys,
the order will be placed if market price rises above this value.
"""
def __init__(self, limit_price, stop_price, asset=None, exchange=None):
check_stoplimit_prices(limit_price, "limit")
check_stoplimit_prices(stop_price, "stop")
self.limit_price = limit_price
self.stop_price = stop_price
self._exchange = exchange
self.asset = asset
def get_limit_price(self, is_buy):
return asymmetric_round_price(
self.limit_price,
is_buy,
tick_size=(0.01 if self.asset is None else self.asset.tick_size),
)
def get_stop_price(self, is_buy):
return asymmetric_round_price(
self.stop_price,
not is_buy,
tick_size=(0.01 if self.asset is None else self.asset.tick_size),
)
def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95):
"""
Asymmetric rounding function for adjusting prices to the specified number
of places in a way that "improves" the price. For limit prices, this means
preferring to round down on buys and preferring to round up on sells.
For stop prices, it means the reverse.
If prefer_round_down == True:
When .05 below to .95 above a specified decimal place, use it.
If prefer_round_down == False:
When .95 below to .05 above a specified decimal place, use it.
In math-speak:
If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01.
If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01.
"""
precision = zp_math.number_of_decimal_places(tick_size)
multiplier = int(tick_size * (10 ** precision))
diff -= 0.5 # shift the difference down
diff *= 10 ** -precision # adjust diff to precision of tick size
diff *= multiplier # adjust diff to value of tick_size
# Subtracting an epsilon from diff to enforce the open-ness of the upper
# bound on buys and the lower bound on sells. Using the actual system
# epsilon doesn't quite get there, so use a slightly less epsilon-ey value.
epsilon = float_info.epsilon * 10
diff = diff - epsilon
# relies on rounding half away from zero, unlike numpy's bankers' rounding
rounded = tick_size * consistent_round(
(price - (diff if prefer_round_down else -diff)) / tick_size
)
if zp_math.tolerant_equals(rounded, 0.0):
return 0.0
return rounded
def check_stoplimit_prices(price, label):
"""
Check to make sure the stop/limit prices are reasonable and raise
a BadOrderParameters exception if not.
"""
try:
if not isfinite(price):
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, price)
)
# This catches arbitrary objects
except TypeError:
raise BadOrderParameters(
msg="Attempted to place an order with a {} price "
"of {}.".format(label, type(price))
)
if price < 0:
raise BadOrderParameters(
msg="Can't place a {} order with a negative price.".format(label)
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/execution.py | execution.py |
import logbook
import pandas as pd
from zipline.utils.memoize import remember_last
from zipline.utils.pandas_utils import normalize_date
log = logbook.Logger("Trading")
DEFAULT_CAPITAL_BASE = 1e5
class SimulationParameters(object):
def __init__(
self,
start_session,
end_session,
trading_calendar,
capital_base=DEFAULT_CAPITAL_BASE,
emission_rate="daily",
data_frequency="daily",
arena="backtest",
):
assert type(start_session) == pd.Timestamp
assert type(end_session) == pd.Timestamp
assert trading_calendar is not None, "Must pass in trading calendar!"
assert start_session <= end_session, "Period start falls after period end."
assert (
start_session <= trading_calendar.last_trading_session
), "Period start falls after the last known trading day."
assert (
end_session >= trading_calendar.first_trading_session
), "Period end falls before the first known trading day."
# chop off any minutes or hours on the given start and end dates,
# as we only support session labels here (and we represent session
# labels as midnight UTC).
self._start_session = normalize_date(start_session)
self._end_session = normalize_date(end_session)
self._capital_base = capital_base
self._emission_rate = emission_rate
self._data_frequency = data_frequency
# copied to algorithm's environment for runtime access
self._arena = arena
self._trading_calendar = trading_calendar
if not trading_calendar.is_session(self._start_session):
# if the start date is not a valid session in this calendar,
# push it forward to the first valid session
self._start_session = trading_calendar.minute_to_session_label(
self._start_session
)
if not trading_calendar.is_session(self._end_session):
# if the end date is not a valid session in this calendar,
# pull it backward to the last valid session before the given
# end date.
self._end_session = trading_calendar.minute_to_session_label(
self._end_session, direction="previous"
)
self._first_open = trading_calendar.open_and_close_for_session(
self._start_session
)[0]
self._last_close = trading_calendar.open_and_close_for_session(
self._end_session
)[1]
@property
def capital_base(self):
return self._capital_base
@property
def emission_rate(self):
return self._emission_rate
@property
def data_frequency(self):
return self._data_frequency
@data_frequency.setter
def data_frequency(self, val):
self._data_frequency = val
@property
def arena(self):
return self._arena
@arena.setter
def arena(self, val):
self._arena = val
@property
def start_session(self):
return self._start_session
@property
def end_session(self):
return self._end_session
@property
def first_open(self):
return self._first_open
@property
def last_close(self):
return self._last_close
@property
def trading_calendar(self):
return self._trading_calendar
@property
@remember_last
def sessions(self):
return self._trading_calendar.sessions_in_range(
self.start_session, self.end_session
)
def create_new(self, start_session, end_session, data_frequency=None):
if data_frequency is None:
data_frequency = self.data_frequency
return SimulationParameters(
start_session,
end_session,
self._trading_calendar,
capital_base=self.capital_base,
emission_rate=self.emission_rate,
data_frequency=data_frequency,
arena=self.arena,
)
def __repr__(self):
return """
{class_name}(
start_session={start_session},
end_session={end_session},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close},
trading_calendar={trading_calendar}
)\
""".format(
class_name=self.__class__.__name__,
start_session=self.start_session,
end_session=self.end_session,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close,
trading_calendar=self._trading_calendar,
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/trading.py | trading.py |
from abc import abstractmethod
import math
import numpy as np
from pandas import isnull
from toolz import merge
from zipline.assets import Equity, Future
from zipline.errors import HistoryWindowStartsBeforeData
from zipline.finance.constants import ROOT_SYMBOL_TO_ETA, DEFAULT_ETA
from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta
from zipline.finance.transaction import create_transaction
from zipline.utils.cache import ExpiringCache
from zipline.utils.dummy import DummyMapping
from zipline.utils.input_validation import (
expect_bounded,
expect_strictly_bounded,
)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
SQRT_252 = math.sqrt(252)
DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05
class LiquidityExceeded(Exception):
pass
def fill_price_worse_than_limit_price(fill_price, order):
"""
Checks whether the fill price is worse than the order's limit price.
Parameters
----------
fill_price: float
The price to check.
order: zipline.finance.order.Order
The order whose limit price to check.
Returns
-------
bool: Whether the fill price is above the limit price (for a buy) or below
the limit price (for a sell).
"""
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and fill_price > order.limit) or (
order.direction < 0 and fill_price < order.limit
):
return True
return False
class SlippageModel(metaclass=FinancialModelMeta):
"""
Abstract base class for slippage models.
Slippage models are responsible for the rates and prices at which orders
fill during a simulation.
To implement a new slippage model, create a subclass of
:class:`~zipline.finance.slippage.SlippageModel` and implement
:meth:`process_order`.
Methods
-------
process_order(data, order)
Attributes
----------
volume_for_bar : int
Number of shares that have already been filled for the
currently-filling asset in the current minute. This attribute is
maintained automatically by the base class. It can be used by
subclasses to keep track of the total amount filled if there are
multiple open orders for a single asset.
Notes
-----
Subclasses that define their own constructors should call
``super(<subclass name>, self).__init__()`` before performing other
initialization.
"""
# Asset types that are compatible with the given model.
allowed_asset_types = (Equity, Future)
def __init__(self):
self._volume_for_bar = 0
@property
def volume_for_bar(self):
return self._volume_for_bar
@abstractmethod
def process_order(self, data, order):
"""
Compute the number of shares and price to fill for ``order`` in the
current minute.
Parameters
----------
data : zipline.protocol.BarData
The data for the given bar.
order : zipline.finance.order.Order
The order to simulate.
Returns
-------
execution_price : float
The price of the fill.
execution_volume : int
The number of shares that should be filled. Must be between ``0``
and ``order.amount - order.filled``. If the amount filled is less
than the amount remaining, ``order`` will remain open and will be
passed again to this method in the next minute.
Raises
------
zipline.finance.slippage.LiquidityExceeded
May be raised if no more orders should be processed for the current
asset during the current bar.
Notes
-----
Before this method is called, :attr:`volume_for_bar` will be set to the
number of shares that have already been filled for ``order.asset`` in
the current minute.
:meth:`process_order` is not called by the base class on bars for which
there was no historical volume.
"""
raise NotImplementedError("process_order")
def simulate(self, data, asset, orders_for_asset, trading_policy): #20230804 (by MRC)新增trading_policy功能
self._volume_for_bar = 0
volume = data.current(asset, "volume")
if volume == 0:
return
# can use the close price, since we verified there's volume in this
# bar.
price = data.current(asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
dt = data.current_dt
# 20230804 (by MRC) 新增trading_policy判斷,當trading_policy.validate為False時不交易。
if trading_policy is not None:
for i in trading_policy:
if not i.validate(asset, dt, data):
return
else:
continue
for order in orders_for_asset:
if order.open_amount == 0:
continue
order.check_triggers(price, dt)
if not order.triggered:
continue
txn = None
try:
execution_price, execution_volume = self.process_order(
data, order
)
if execution_price is not None:
txn = create_transaction(
order,
data.current_dt,
execution_price,
execution_volume,
)
except LiquidityExceeded:
break
if txn:
self._volume_for_bar += abs(txn.amount)
yield order, txn
def asdict(self):
return self.__dict__
class NoSlippage(SlippageModel):
"""A slippage model where all orders fill immediately and completely at the
current close price.
Notes
-----
This is primarily used for testing.
"""
@staticmethod
def process_order(data, order):
return (
data.current(order.asset, "close"),
order.amount,
)
class EquitySlippageModel(SlippageModel, metaclass=AllowedAssetMarker):
"""
Base class for slippage models which only support equities.
"""
allowed_asset_types = (Equity,)
class FutureSlippageModel(SlippageModel, metaclass=AllowedAssetMarker):
"""
Base class for slippage models which only support futures.
"""
allowed_asset_types = (Future,)
class VolumeShareSlippage(SlippageModel):
"""
Model slippage as a quadratic function of percentage of historical volume.
Orders to buy will be filled at::
price * (1 + price_impact * (volume_share ** 2))
Orders to sell will be filled at::
price * (1 - price_impact * (volume_share ** 2))
where ``price`` is the close price for the bar, and ``volume_share`` is the
percentage of minutely volume filled, up to a max of ``volume_limit``.
Parameters
----------
volume_limit : float, optional
Maximum percent of historical volume that can fill in each bar. 0.5
means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e.,
2.5%).
price_impact : float, optional
Scaling coefficient for price impact. Larger values will result in more
simulated price impact. Smaller values will result in less simulated
price impact. Default is 0.1.
"""
def __init__(
self,
volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT,
price_impact=0.1,
):
super(VolumeShareSlippage, self).__init__()
self.volume_limit = volume_limit
self.price_impact = price_impact
def __repr__(self):
return """
{class_name}(
volume_limit={volume_limit},
price_impact={price_impact})
""".strip().format(
class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
price_impact=self.price_impact,
)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = self.volume_limit * volume
# price impact accounts for the total volume of transactions
# created against the current minute bar
remaining_volume = max_volume - self.volume_for_bar
if remaining_volume < 1:
# we can't fill any more transactions
raise LiquidityExceeded()
# the current order amount will be the min of the
# volume available in the bar or the open amount.
cur_volume = int(min(remaining_volume, abs(order.open_amount)))
if cur_volume < 1:
return None, None
# tally the current amount into our total amount ordered.
# total amount will be used to calculate price impact
total_volume = self.volume_for_bar + cur_volume
volume_share = min(total_volume / volume, self.volume_limit)
price = data.current(order.asset, "close")
# BEGIN
#
# Remove this block after fixing data to ensure volume always has
# corresponding price.
if isnull(price):
return
# END
simulated_impact = (
volume_share ** 2
* math.copysign(self.price_impact, order.direction)
* price
)
impacted_price = price + simulated_impact
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return (impacted_price, math.copysign(cur_volume, order.direction))
class FixedSlippage(SlippageModel):
"""
Simple model assuming a fixed-size spread for all assets.
Parameters
----------
spread : float, optional
Size of the assumed spread for all assets.
Orders to buy will be filled at ``close + (spread / 2)``.
Orders to sell will be filled at ``close - (spread / 2)``.
Notes
-----
This model does not impose limits on the size of fills. An order for an
asset will always be filled as soon as any trading activity occurs in the
order's asset, even if the size of the order is greater than the historical
volume.
"""
def __init__(self, spread=0.0):
super(FixedSlippage, self).__init__()
self.spread = spread
def __repr__(self):
return "{class_name}(spread={spread})".format(
class_name=self.__class__.__name__,
spread=self.spread,
)
def process_order(self, data, order):
price = data.current(order.asset, "close")
return (price + (self.spread / 2.0 * order.direction), order.amount)
class MarketImpactBase(SlippageModel):
"""
Base class for slippage models which compute a simulated price impact
according to a history lookback.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000
def __init__(self):
super(MarketImpactBase, self).__init__()
self._window_data_cache = ExpiringCache()
@abstractmethod
def get_txn_volume(self, data, order):
"""
Return the number of shares we would like to order in this minute.
Parameters
----------
data : BarData
order : Order
Return
------
int : the number of shares
"""
raise NotImplementedError("get_txn_volume")
@abstractmethod
def get_simulated_impact(
self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility,
):
"""
Calculate simulated price impact.
Parameters
----------
order : The order being processed.
current_price : Current price of the asset being ordered.
current_volume : Volume of the asset being ordered for the current bar.
txn_volume : Number of shares/contracts being ordered.
mean_volume : Trailing ADV of the asset.
volatility : Annualized daily volatility of returns.
Return
------
int : impact on the current price.
"""
raise NotImplementedError("get_simulated_impact")
def process_order(self, data, order):
if order.open_amount == 0:
return None, None
minute_data = data.current(order.asset, ["volume", "high", "low"])
mean_volume, volatility = self._get_window_data(data, order.asset, 20)
# Price to use is the average of the minute bar's open and close.
price = np.mean([minute_data["high"], minute_data["low"]])
volume = minute_data["volume"]
if not volume:
return None, None
txn_volume = int(
min(self.get_txn_volume(data, order), abs(order.open_amount))
)
# If the computed transaction volume is zero or a decimal value, 'int'
# will round it down to zero. In that case just bail.
if txn_volume == 0:
return None, None
if mean_volume == 0 or np.isnan(volatility):
# If this is the first day the contract exists or there is no
# volume history, default to a conservative estimate of impact.
simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT
else:
simulated_impact = self.get_simulated_impact(
order=order,
current_price=price,
current_volume=volume,
txn_volume=txn_volume,
mean_volume=mean_volume,
volatility=volatility,
)
impacted_price = price + math.copysign(
simulated_impact, order.direction
)
if fill_price_worse_than_limit_price(impacted_price, order):
return None, None
return impacted_price, math.copysign(txn_volume, order.direction)
def _get_window_data(self, data, asset, window_length):
"""
Internal utility method to return the trailing mean volume over the
past 'window_length' days, and volatility of close prices for a
specific asset.
Parameters
----------
data : The BarData from which to fetch the daily windows.
asset : The Asset whose data we are fetching.
window_length : Number of days of history used to calculate the mean
volume and close price volatility.
Returns
-------
(mean volume, volatility)
"""
try:
values = self._window_data_cache.get(asset, data.current_session)
except KeyError:
try:
# Add a day because we want 'window_length' complete days,
# excluding the current day.
volume_history = data.history(
asset,
"volume",
window_length + 1,
"1d",
)
close_history = data.history(
asset,
"close",
window_length + 1,
"1d",
)
except HistoryWindowStartsBeforeData:
# If there is not enough data to do a full history call, return
# values as if there was no data.
return 0, np.NaN
# Exclude the first value of the percent change array because it is
# always just NaN.
close_volatility = (
close_history[:-1]
.pct_change()[1:]
.std(
skipna=False,
)
)
values = {
"volume": volume_history[:-1].mean(),
"close": close_volatility * SQRT_252,
}
self._window_data_cache.set(asset, values, data.current_session)
return values["volume"], values["close"]
class VolatilityVolumeShare(MarketImpactBase):
"""
Model slippage for futures contracts according to the following formula:
new_price = price + (price * MI / 10000),
where 'MI' is market impact, which is defined as:
MI = eta * sigma * sqrt(psi)
- ``eta`` is a constant which varies by root symbol.
- ``sigma`` is 20-day annualized volatility.
- ``psi`` is the volume traded in the given bar divided by 20-day ADV.
Parameters
----------
volume_limit : float
Maximum percentage (as a decimal) of a bar's total volume that can be
traded.
eta : float or dict
Constant used in the market impact formula. If given a float, the eta
for all futures contracts is the same. If given a dictionary, it must
map root symbols to the eta for contracts of that symbol.
"""
NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000
allowed_asset_types = (Future,)
def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA):
super(VolatilityVolumeShare, self).__init__()
self.volume_limit = volume_limit
# If 'eta' is a constant, use a dummy mapping to treat it as a
# dictionary that always returns the same value.
# NOTE: This dictionary does not handle unknown root symbols, so it may
# be worth revisiting this behavior.
if isinstance(eta, (int, float)):
self._eta = DummyMapping(float(eta))
else:
# Eta is a dictionary. If the user's dictionary does not provide a
# value for a certain contract, fall back on the pre-defined eta
# values per root symbol.
self._eta = merge(ROOT_SYMBOL_TO_ETA, eta)
def __repr__(self):
if isinstance(self._eta, DummyMapping):
# Eta is a constant, so extract it.
eta = self._eta["dummy key"]
else:
eta = "<varies>"
return "{class_name}(volume_limit={volume_limit}, eta={eta})".format(
class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
eta=eta,
)
def get_simulated_impact(
self,
order,
current_price,
current_volume,
txn_volume,
mean_volume,
volatility,
):
try:
eta = self._eta[order.asset.root_symbol]
except Exception:
eta = DEFAULT_ETA
psi = txn_volume / mean_volume
market_impact = eta * volatility * math.sqrt(psi)
# We divide by 10,000 because this model computes to basis points.
# To convert from bps to % we need to divide by 100, then again to
# convert from % to fraction.
return (current_price * market_impact) / 10000
def get_txn_volume(self, data, order):
volume = data.current(order.asset, "volume")
return volume * self.volume_limit
class FixedBasisPointsSlippage(SlippageModel):
"""
Model slippage as a fixed percentage difference from historical minutely
close price, limiting the size of fills to a fixed percentage of historical
minutely volume.
Orders to buy are filled at::
historical_price * (1 + (basis_points * 0.0001))
Orders to sell are filled at::
historical_price * (1 - (basis_points * 0.0001))
Fill sizes are capped at::
historical_volume * volume_limit
Parameters
----------
basis_points : float, optional
Number of basis points of slippage to apply for each fill. Default
is 5 basis points.
volume_limit : float, optional
Fraction of trading volume that can be filled each minute. Default is
10% of trading volume.
Notes
-----
- A basis point is one one-hundredth of a percent.
- This class, default-constructed, is zipline's default slippage model for
equities.
"""
@expect_bounded(
basis_points=(0, None),
__funcname="FixedBasisPointsSlippage",
)
@expect_strictly_bounded(
volume_limit=(0, None),
__funcname="FixedBasisPointsSlippage",
)
def __init__(self, basis_points=5.0, volume_limit=0.1):
super(FixedBasisPointsSlippage, self).__init__()
self.basis_points = basis_points
self.percentage = self.basis_points / 10000.0
self.volume_limit = volume_limit
def __repr__(self):
return """
{class_name}(
basis_points={basis_points},
volume_limit={volume_limit},
)
""".strip().format(
class_name=self.__class__.__name__,
basis_points=self.basis_points,
volume_limit=self.volume_limit,
)
def process_order(self, data, order):
volume = data.current(order.asset, "volume")
max_volume = int(self.volume_limit * volume)
price = data.current(order.asset, "close")
shares_to_fill = min(
abs(order.open_amount), max_volume - self.volume_for_bar
)
if shares_to_fill == 0:
raise LiquidityExceeded()
return (
price + price * (self.percentage * order.direction),
shares_to_fill * order.direction,
)
if __name__ == "__main__":
f = EquitySlippageModel()
# print(f.__meta__)
print(f.__class__) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/slippage.py | slippage.py |
import math
import uuid
from enum import IntEnum
import zipline.protocol as zp
from zipline.assets import Asset
from zipline.utils.input_validation import expect_types
ORDER_STATUS = IntEnum(
"ORDER_STATUS",
[
"OPEN",
"FILLED",
"CANCELLED",
"REJECTED",
"HELD",
],
start=0,
)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
ORDER_FIELDS_TO_IGNORE = {"type", "direction", "_status", "asset"}
class Order(object):
# using __slots__ to save on memory usage. Simulations can create many
# Order objects and we keep them all in memory, so it's worthwhile trying
# to cut down on the memory footprint of this object.
__slots__ = [
"id",
"dt",
"reason",
"created",
"asset",
"amount",
"filled",
"commission",
"_status",
"stop",
"limit",
"stop_reached",
"limit_reached",
"direction",
"type",
"broker_order_id",
]
@expect_types(asset=Asset)
def __init__(
self,
dt,
asset,
amount,
stop=None,
limit=None,
filled=0,
commission=0,
id=None,
):
"""
@dt - datetime.datetime that the order was placed
@asset - asset for the order.
@amount - the number of shares to buy/sell
a positive sign indicates a buy
a negative sign indicates a sell
@filled - how many shares of the order have been filled so far
"""
# get a string representation of the uuid.
self.id = self.make_id() if id is None else id
self.dt = dt
self.reason = None
self.created = dt
self.asset = asset
self.amount = amount
self.filled = filled
self.commission = commission
self._status = ORDER_STATUS.OPEN
self.stop = stop
self.limit = limit
self.stop_reached = False
self.limit_reached = False
self.direction = math.copysign(1, self.amount)
self.type = zp.DATASOURCE_TYPE.ORDER
self.broker_order_id = None
@staticmethod
def make_id():
return uuid.uuid4().hex
def to_dict(self):
dct = {
name: getattr(self, name)
for name in self.__slots__
if name not in ORDER_FIELDS_TO_IGNORE
}
if self.broker_order_id is None:
del dct["broker_order_id"]
# Adding 'sid' for backwards compatibility with downstream consumers.
dct["sid"] = self.asset
dct["status"] = self.status
return dct
@property
def sid(self):
# For backwards compatibility because we pass this object to
# custom slippage models.
return self.asset
def to_api_obj(self):
pydict = self.to_dict()
obj = zp.Order(initial_values=pydict)
return obj
def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
(
stop_reached,
limit_reached,
sl_stop_reached,
) = self.check_order_triggers(price)
if (stop_reached, limit_reached) != (
self.stop_reached,
self.limit_reached,
):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None
# TODO: simplify
# flake8: noqa: C901
def check_order_triggers(self, current_price):
"""
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
"""
if self.triggered:
return (self.stop_reached, self.limit_reached, False)
stop_reached = False
limit_reached = False
sl_stop_reached = False
order_type = 0
if self.amount > 0:
order_type |= BUY
else:
order_type |= SELL
if self.stop is not None:
order_type |= STOP
if self.limit is not None:
order_type |= LIMIT
if order_type == BUY | STOP | LIMIT:
if current_price >= self.stop:
sl_stop_reached = True
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | STOP | LIMIT:
if current_price <= self.stop:
sl_stop_reached = True
if current_price >= self.limit:
limit_reached = True
elif order_type == BUY | STOP:
if current_price >= self.stop:
stop_reached = True
elif order_type == SELL | STOP:
if current_price <= self.stop:
stop_reached = True
elif order_type == BUY | LIMIT:
if current_price <= self.limit:
limit_reached = True
elif order_type == SELL | LIMIT:
# This is a SELL LIMIT order
if current_price >= self.limit:
limit_reached = True
return (stop_reached, limit_reached, sl_stop_reached)
def handle_split(self, ratio):
# update the amount, limit_price, and stop_price
# by the split's ratio
# info here: http://finra.complinet.com/en/display/display_plain.html?
# rbid=2403&element_id=8950&record_id=12208&print=1
# new_share_amount = old_share_amount / ratio
# new_price = old_price * ratio
self.amount = int(self.amount / ratio)
if self.limit is not None:
self.limit = round(self.limit * ratio, 2)
if self.stop is not None:
self.stop = round(self.stop * ratio, 2)
@property
def status(self):
if not self.open_amount:
return ORDER_STATUS.FILLED
elif self._status == ORDER_STATUS.HELD and self.filled:
return ORDER_STATUS.OPEN
else:
return self._status
@status.setter
def status(self, status):
self._status = status
def cancel(self):
self.status = ORDER_STATUS.CANCELLED
def reject(self, reason=""):
self.status = ORDER_STATUS.REJECTED
self.reason = reason
def hold(self, reason=""):
self.status = ORDER_STATUS.HELD
self.reason = reason
@property
def open(self):
return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD]
@property
def triggered(self):
"""
For a market order, True.
For a stop order, True IFF stop_reached.
For a limit order, True IFF limit_reached.
"""
if self.stop is not None and not self.stop_reached:
return False
if self.limit is not None and not self.limit_reached:
return False
return True
@property
def open_amount(self):
return self.amount - self.filled
def __repr__(self):
"""
String representation for this object.
"""
return "Order(%s)" % self.to_dict().__repr__()
def __unicode__(self):
"""
Unicode representation for this object.
"""
return str(repr(self)) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/order.py | order.py |
from math import copysign
import numpy as np
import logbook
from zipline.assets import Future
import zipline.protocol as zp
log = logbook.Logger("Performance")
class Position(object):
__slots__ = "inner_position", "protocol_position"
def __init__(
self, asset, amount=0, cost_basis=0.0, last_sale_price=0.0, last_sale_date=None
):
inner = zp.InnerPosition(
asset=asset,
amount=amount,
cost_basis=cost_basis,
last_sale_price=last_sale_price,
last_sale_date=last_sale_date,
)
object.__setattr__(self, "inner_position", inner)
object.__setattr__(self, "protocol_position", zp.Position(inner))
def __getattr__(self, attr):
return getattr(self.inner_position, attr)
def __setattr__(self, attr, value):
setattr(self.inner_position, attr, value)
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {"amount": self.amount * dividend.amount}
def earn_stock_dividend(self, stock_dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
return {
"payment_asset": stock_dividend.payment_asset,
"share_count": np.floor(self.amount * float(stock_dividend.ratio)),
}
def handle_split(self, asset, ratio):
"""
Update the position by the split ratio, and return the resulting
fractional share that will be converted into cash.
Returns the unused cash.
"""
if self.asset != asset:
raise Exception("updating split with the wrong asset!")
# adjust the # of shares by the ratio
# (if we had 100 shares, and the ratio is 3,
# we now have 33 shares)
# (old_share_count / ratio = new_share_count)
# (old_price * ratio = new_price)
# e.g., 33.333
raw_share_count = self.amount / float(ratio)
# e.g., 33
full_share_count = np.floor(raw_share_count)
# e.g., 0.333
fractional_share_count = raw_share_count - full_share_count
# adjust the cost basis to the nearest cent, e.g., 60.0
new_cost_basis = round(self.cost_basis * ratio, 2)
self.cost_basis = new_cost_basis
self.amount = full_share_count
return_cash = round(float(fractional_share_count * new_cost_basis), 2)
log.info("after split: " + str(self))
log.info("returning cash: " + str(return_cash))
# return the leftover cash, which will be converted into cash
# (rounded to the nearest cent)
return return_cash
def update(self, txn):
if self.asset != txn.asset:
raise Exception("updating position with txn for a " "different asset")
total_shares = self.amount + txn.amount
if total_shares == 0:
self.cost_basis = 0.0
else:
prev_direction = copysign(1, self.amount)
txn_direction = copysign(1, txn.amount)
if prev_direction != txn_direction:
# we're covering a short or closing a position
if abs(txn.amount) > abs(self.amount):
# we've closed the position and gone short
# or covered the short position and gone long
self.cost_basis = txn.price
else:
prev_cost = self.cost_basis * self.amount
txn_cost = txn.amount * txn.price
total_cost = prev_cost + txn_cost
self.cost_basis = total_cost / total_shares
# Update the last sale price if txn is
# best data we have so far
if self.last_sale_date is None or txn.dt > self.last_sale_date:
self.last_sale_price = txn.price
self.last_sale_date = txn.dt
self.amount = total_shares
def adjust_commission_cost_basis(self, asset, cost):
"""
A note about cost-basis in zipline: all positions are considered
to share a cost basis, even if they were executed in different
transactions with different commission costs, different prices, etc.
Due to limitations about how zipline handles positions, zipline will
currently spread an externally-delivered commission charge across
all shares in a position.
"""
if asset != self.asset:
raise Exception("Updating a commission for a different asset?")
if cost == 0.0:
return
# If we no longer hold this position, there is no cost basis to
# adjust.
if self.amount == 0:
return
# We treat cost basis as the share price where we have broken even.
# For longs, commissions cause a relatively straight forward increase
# in the cost basis.
#
# For shorts, you actually want to decrease the cost basis because you
# break even and earn a profit when the share price decreases.
#
# Shorts are represented as having a negative `amount`.
#
# The multiplication and division by `amount` cancel out leaving the
# cost_basis positive, while subtracting the commission.
prev_cost = self.cost_basis * self.amount
if isinstance(asset, Future):
cost_to_use = cost / asset.price_multiplier
else:
cost_to_use = cost
new_cost = prev_cost + cost_to_use
self.cost_basis = new_cost / self.amount
def __repr__(self):
template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \
last_sale_price: {last_sale_price}"
return template.format(
asset=self.asset,
amount=self.amount,
cost_basis=self.cost_basis,
last_sale_price=self.last_sale_price,
)
def to_dict(self):
"""
Creates a dictionary representing the state of this position.
Returns a dict object of the form:
"""
return {
"sid": self.asset,
"amount": self.amount,
"cost_basis": self.cost_basis,
"last_sale_price": self.last_sale_price,
} | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/position.py | position.py |
from collections import namedtuple, OrderedDict
from functools import partial
from math import isnan
import logbook
import numpy as np
import pandas as pd
from zipline.assets import Future
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.utils.sentinel import sentinel
from .position import Position
from ._finance_ext import (
PositionStats,
calculate_position_tracker_stats,
update_position_last_sale_prices,
)
log = logbook.Logger("Performance")
class PositionTracker(object):
"""The current state of the positions held.
Parameters
----------
data_frequency : {'daily', 'minute'}
The data frequency of the simulation.
"""
def __init__(self, data_frequency):
self.positions = OrderedDict()
self._unpaid_dividends = {}
self._unpaid_stock_dividends = {}
self._positions_store = zp.Positions()
self.data_frequency = data_frequency
# cache the stats until something alters our positions
self._dirty_stats = True
self._stats = PositionStats.new()
def update_position(
self,
asset,
amount=None,
last_sale_price=None,
last_sale_date=None,
cost_basis=None,
):
self._dirty_stats = True
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
if amount is not None:
position.amount = amount
if last_sale_price is not None:
position.last_sale_price = last_sale_price
if last_sale_date is not None:
position.last_sale_date = last_sale_date
if cost_basis is not None:
position.cost_basis = cost_basis
def execute_transaction(self, txn):
self._dirty_stats = True
asset = txn.asset
if asset not in self.positions:
position = Position(asset)
self.positions[asset] = position
else:
position = self.positions[asset]
position.update(txn)
if position.amount == 0:
del self.positions[asset]
try:
# if this position exists in our user-facing dictionary,
# remove it as well.
del self._positions_store[asset]
except KeyError:
pass
def handle_commission(self, asset, cost):
# Adjust the cost basis of the stock if we own it
if asset in self.positions:
self._dirty_stats = True
self.positions[asset].adjust_commission_cost_basis(asset, cost)
def handle_splits(self, splits):
"""Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
int: The leftover cash from fractional shares after modifying each
position.
"""
total_leftover_cash = 0
for asset, ratio in splits:
if asset in self.positions:
self._dirty_stats = True
# Make the position object handle the split. It returns the
# leftover cash from a fractional share, if there is any.
position = self.positions[asset]
leftover_cash = position.handle_split(asset, ratio)
total_leftover_cash += leftover_cash
return total_leftover_cash
def earn_dividends(self, cash_dividends, stock_dividends):
"""Given a list of dividends whose ex_dates are all the next trading
day, calculate and store the cash and/or stock payments to be paid on
each dividend's pay date.
Parameters
----------
cash_dividends : iterable of (asset, amount, pay_date) namedtuples
stock_dividends: iterable of (asset, payment_asset, ratio, pay_date)
namedtuples.
"""
for cash_dividend in cash_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
# Store the earned dividends so that they can be paid on the
# dividends' pay_dates.
div_owed = self.positions[cash_dividend.asset].earn_dividend(
cash_dividend,
)
if len(div_owed)!= 0 and pd.isnull(cash_dividend.pay_date)==False:
log.info(str(cash_dividend.asset) + \
", cash_dividend amount: " + str(cash_dividend.amount) + \
", pay_date: " + str(cash_dividend.pay_date.strftime('%Y-%m-%d')) + \
", div_owed: " + str(div_owed['amount']))
#20230525 (by MRC) 顯示股利發放訊息
try:
self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)
except KeyError:
self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]
for stock_dividend in stock_dividends:
self._dirty_stats = True # only mark dirty if we pay a dividend
div_owed = self.positions[stock_dividend.asset].earn_stock_dividend(
stock_dividend
)
try:
self._unpaid_stock_dividends[stock_dividend.pay_date].append(
div_owed,
)
except KeyError:
self._unpaid_stock_dividends[stock_dividend.pay_date] = [
div_owed,
]
def pay_dividends(self, next_trading_day):
"""
Returns a cash payment based on the dividends that should be paid out
according to the accumulated bookkeeping of earned, unpaid, and stock
dividends.
"""
net_cash_payment = 0.0
try:
payments = self._unpaid_dividends[next_trading_day]
# Mark these dividends as paid by dropping them from our unpaid
del self._unpaid_dividends[next_trading_day]
except KeyError:
payments = []
# representing the fact that we're required to reimburse the owner of
# the stock for any dividends paid while borrowing.
for payment in payments:
net_cash_payment += payment["amount"]
# Add stock for any stock dividends paid. Again, the values here may
# be negative in the case of short positions.
try:
stock_payments = self._unpaid_stock_dividends[next_trading_day]
except KeyError:
stock_payments = []
for stock_payment in stock_payments:
payment_asset = stock_payment["payment_asset"]
share_count = stock_payment["share_count"]
# note we create a Position for stock dividend if we don't
# already own the asset
if payment_asset in self.positions:
position = self.positions[payment_asset]
else:
position = self.positions[payment_asset] = Position(
payment_asset,
)
position.amount += share_count
return net_cash_payment
def maybe_create_close_position_transaction(self, asset, dt, data_portal):
if not self.positions.get(asset):
return None
amount = self.positions.get(asset).amount
price = data_portal.get_spot_value(asset, "price", dt, self.data_frequency)
# Get the last traded price if price is no longer available
if isnan(price):
price = self.positions.get(asset).last_sale_price
return Transaction(
asset=asset,
amount=-amount,
dt=dt,
price=price,
order_id=None,
)
def get_positions(self):
positions = self._positions_store
for asset, pos in self.positions.items():
# Adds the new position if we didn't have one before, or overwrite
# one we have currently
positions[asset] = pos.protocol_position
return positions
def get_position_list(self):
return [
pos.to_dict() for asset, pos in self.positions.items() if pos.amount != 0
]
def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False):
self._dirty_stats = True
if handle_non_market_minutes:
previous_minute = data_portal.trading_calendar.previous_minute(dt)
get_price = partial(
data_portal.get_adjusted_value,
field="price",
dt=previous_minute,
perspective_dt=dt,
data_frequency=self.data_frequency,
)
else:
get_price = partial(
data_portal.get_scalar_asset_spot_value,
field="price",
dt=dt,
data_frequency=self.data_frequency,
)
update_position_last_sale_prices(self.positions, get_price, dt)
@property
def stats(self):
"""The current status of the positions.
Returns
-------
stats : PositionStats
The current stats position stats.
Notes
-----
This is cached, repeated access will not recompute the stats until
the stats may have changed.
"""
if self._dirty_stats:
calculate_position_tracker_stats(self.positions, self._stats)
self._dirty_stats = False
return self._stats
move_to_end = OrderedDict.move_to_end
PeriodStats = namedtuple(
"PeriodStats",
"net_liquidation gross_leverage net_leverage",
)
not_overridden = sentinel(
"not_overridden",
"Mark that an account field has not been overridden",
)
class Ledger(object):
"""The ledger tracks all orders and transactions as well as the current
state of the portfolio and positions.
Attributes
----------
portfolio : zipline.protocol.Portfolio
The updated portfolio being managed.
account : zipline.protocol.Account
The updated account being managed.
position_tracker : PositionTracker
The current set of positions.
todays_returns : float
The current day's returns. In minute emission mode, this is the partial
day's returns. In daily emission mode, this is
``daily_returns[session]``.
daily_returns_series : pd.Series
The daily returns series. Days that have not yet finished will hold
a value of ``np.nan``.
daily_returns_array : np.ndarray
The daily returns as an ndarray. Days that have not yet finished will
hold a value of ``np.nan``.
"""
def __init__(self, trading_sessions, capital_base, data_frequency):
if len(trading_sessions):
start = trading_sessions[0]
else:
start = None
# Have some fields of the portfolio changed? This should be accessed
# through ``self._dirty_portfolio``
self.__dirty_portfolio = False
self._immutable_portfolio = zp.Portfolio(start, capital_base)
self._portfolio = zp.MutableView(self._immutable_portfolio)
self.daily_returns_series = pd.Series(
np.nan,
index=trading_sessions,
)
# Get a view into the storage of the returns series. Metrics
# can access this directly in minute mode for performance reasons.
self.daily_returns_array = self.daily_returns_series.values
self._previous_total_returns = 0
# this is a component of the cache key for the account
self._position_stats = None
# Have some fields of the account changed?
self._dirty_account = True
self._immutable_account = zp.Account()
self._account = zp.MutableView(self._immutable_account)
# The broker blotter can override some fields on the account. This is
# way to tangled up at the moment but we aren't fixing it today.
self._account_overrides = {}
self.position_tracker = PositionTracker(data_frequency)
self._processed_transactions = {}
self._orders_by_modified = {}
self._orders_by_id = OrderedDict()
# Keyed by asset, the previous last sale price of positions with
# payouts on price differences, e.g. Futures.
#
# This dt is not the previous minute to the minute for which the
# calculation is done, but the last sale price either before the period
# start, or when the price at execution.
self._payout_last_sale_prices = {}
@property
def todays_returns(self):
# compute today's returns in returns space instead of portfolio-value
# space to work even when we have capital changes
return (self.portfolio.returns + 1) / (self._previous_total_returns + 1) - 1
@property
def _dirty_portfolio(self):
return self.__dirty_portfolio
@_dirty_portfolio.setter
def _dirty_portfolio(self, value):
if value:
# marking the portfolio as dirty also marks the account as dirty
self.__dirty_portfolio = self._dirty_account = value
else:
self.__dirty_portfolio = value
def start_of_session(self, session_label):
self._processed_transactions.clear()
self._orders_by_modified.clear()
self._orders_by_id.clear()
# Save the previous day's total returns so that ``todays_returns``
# produces returns since yesterday. This does not happen in
# ``end_of_session`` because we want ``todays_returns`` to produce the
# correct value in metric ``end_of_session`` handlers.
self._previous_total_returns = self.portfolio.returns
def end_of_bar(self, session_ix):
# make daily_returns hold the partial returns, this saves many
# metrics from doing a concat and copying all of the previous
# returns
self.daily_returns_array[session_ix] = self.todays_returns
def end_of_session(self, session_ix):
# save the daily returns time-series
self.daily_returns_series[session_ix] = self.todays_returns
def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False):
self.position_tracker.sync_last_sale_prices(
dt,
data_portal,
handle_non_market_minutes=handle_non_market_minutes,
)
self._dirty_portfolio = True
@staticmethod
def _calculate_payout(multiplier, amount, old_price, price):
return (price - old_price) * multiplier * amount
def _cash_flow(self, amount):
self._dirty_portfolio = True
p = self._portfolio
p.cash_flow += amount
p.cash += amount
def process_transaction(self, transaction):
"""Add a transaction to ledger, updating the current state as needed.
Parameters
----------
transaction : zp.Transaction
The transaction to execute.
"""
asset = transaction.asset
if isinstance(asset, Future):
try:
old_price = self._payout_last_sale_prices[asset]
except KeyError:
self._payout_last_sale_prices[asset] = transaction.price
else:
position = self.position_tracker.positions[asset]
amount = position.amount
price = transaction.price
self._cash_flow(
self._calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
),
)
if amount + transaction.amount == 0:
del self._payout_last_sale_prices[asset]
else:
self._payout_last_sale_prices[asset] = price
else:
self._cash_flow(-(transaction.price * transaction.amount))
self.position_tracker.execute_transaction(transaction)
# we only ever want the dict form from now on
transaction_dict = transaction.to_dict()
try:
self._processed_transactions[transaction.dt].append(
transaction_dict,
)
except KeyError:
self._processed_transactions[transaction.dt] = [transaction_dict]
def process_splits(self, splits):
"""Processes a list of splits by modifying any positions as needed.
Parameters
----------
splits: list[(Asset, float)]
A list of splits. Each split is a tuple of (asset, ratio).
"""
leftover_cash = self.position_tracker.handle_splits(splits)
if leftover_cash > 0:
self._cash_flow(leftover_cash)
def process_order(self, order):
"""Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
"""
try:
dt_orders = self._orders_by_modified[order.dt]
except KeyError:
self._orders_by_modified[order.dt] = OrderedDict(
[
(order.id, order),
]
)
self._orders_by_id[order.id] = order
else:
self._orders_by_id[order.id] = dt_orders[order.id] = order
# to preserve the order of the orders by modified date
move_to_end(dt_orders, order.id, last=True)
move_to_end(self._orders_by_id, order.id, last=True)
def process_commission(self, commission):
"""Process the commission.
Parameters
----------
commission : zp.Event
The commission being paid.
"""
asset = commission["asset"]
cost = commission["cost"]
self.position_tracker.handle_commission(asset, cost)
self._cash_flow(-cost)
def close_position(self, asset, dt, data_portal):
txn = self.position_tracker.maybe_create_close_position_transaction(
asset,
dt,
data_portal,
)
if txn is not None:
self.process_transaction(txn)
def process_dividends(self, next_session, asset_finder, adjustment_reader):
"""Process dividends for the next session.
This will earn us any dividends whose ex-date is the next session as
well as paying out any dividends whose pay-date is the next session
"""
position_tracker = self.position_tracker
# Earn dividends whose ex_date is the next trading day. We need to
# check if we own any of these stocks so we know to pay them out when
# the pay date comes.
held_sids = set(position_tracker.positions)
if held_sids:
cash_dividends = adjustment_reader.get_dividends_with_ex_date(
held_sids, next_session, asset_finder
)
stock_dividends = adjustment_reader.get_stock_dividends_with_ex_date(
held_sids, next_session, asset_finder
)
# Earning a dividend just marks that we need to get paid out on
# the dividend's pay-date. This does not affect our cash yet.
position_tracker.earn_dividends(
cash_dividends,
stock_dividends,
)
# Pay out the dividends whose pay-date is the next session. This does
# affect out cash.
self._cash_flow(
position_tracker.pay_dividends(
next_session,
),
)
def capital_change(self, change_amount):
self.update_portfolio()
portfolio = self._portfolio
# we update the cash and total value so this is not dirty
portfolio.portfolio_value += change_amount
portfolio.cash += change_amount
def transactions(self, dt=None):
"""Retrieve the dict-form of all of the transactions in a given bar or
for the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up transactions for. If not passed,
or None is explicitly passed, all of the transactions will be
returned.
Returns
-------
transactions : list[dict]
The transaction information.
"""
if dt is None:
# flatten the by-day transactions
return [
txn
for by_day in self._processed_transactions.values()
for txn in by_day
]
return self._processed_transactions.get(dt, [])
def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in self._orders_by_id.values()]
return [o.to_dict() for o in self._orders_by_modified.get(dt, {}).values()]
@property
def positions(self):
return self.position_tracker.get_position_list()
def _get_payout_total(self, positions):
calculate_payout = self._calculate_payout
payout_last_sale_prices = self._payout_last_sale_prices
total = 0
for asset, old_price in payout_last_sale_prices.items():
position = positions[asset]
payout_last_sale_prices[asset] = price = position.last_sale_price
amount = position.amount
total += calculate_payout(
asset.price_multiplier,
amount,
old_price,
price,
)
return total
def update_portfolio(self):
"""Force a computation of the current portfolio state."""
if not self._dirty_portfolio:
return
portfolio = self._portfolio
pt = self.position_tracker
portfolio.positions = pt.get_positions()
position_stats = pt.stats
portfolio.positions_value = position_value = position_stats.net_value
portfolio.positions_exposure = position_stats.net_exposure
self._cash_flow(self._get_payout_total(pt.positions))
start_value = portfolio.portfolio_value
# update the new starting value
portfolio.portfolio_value = end_value = portfolio.cash + position_value
pnl = end_value - start_value
if start_value != 0:
returns = pnl / start_value
else:
returns = 0.0
portfolio.pnl += pnl
portfolio.returns = (1 + portfolio.returns) * (1 + returns) - 1
# the portfolio has been fully synced
self._dirty_portfolio = False
@property
def portfolio(self):
"""Compute the current portfolio.
Notes
-----
This is cached, repeated access will not recompute the portfolio until
the portfolio may have changed.
"""
self.update_portfolio()
return self._immutable_portfolio
def calculate_period_stats(self):
position_stats = self.position_tracker.stats
portfolio_value = self.portfolio.portfolio_value
if portfolio_value == 0:
gross_leverage = net_leverage = np.inf
else:
gross_leverage = position_stats.gross_exposure / portfolio_value
net_leverage = position_stats.net_exposure / portfolio_value
return portfolio_value, gross_leverage, net_leverage
def override_account_fields(
self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden,
):
"""Override fields on ``self.account``."""
# mark that the portfolio is dirty to override the fields again
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs["self"]
@property
def account(self):
if self._dirty_account:
portfolio = self.portfolio
account = self._account
# If no attribute is found in the ``_account_overrides`` resort to
# the following default values. If an attribute is found use the
# existing value. For instance, a broker may provide updates to
# these attributes. In this case we do not want to over write the
# broker values with the default values.
account.settled_cash = portfolio.cash
account.accrued_interest = 0.0
account.buying_power = np.inf
account.equity_with_loan = portfolio.portfolio_value
account.total_positions_value = portfolio.portfolio_value - portfolio.cash
account.total_positions_exposure = portfolio.positions_exposure
account.regt_equity = portfolio.cash
account.regt_margin = np.inf
account.initial_margin_requirement = 0.0
account.maintenance_margin_requirement = 0.0
account.available_funds = portfolio.cash
account.excess_liquidity = portfolio.cash
account.cushion = (
(portfolio.cash / portfolio.portfolio_value)
if portfolio.portfolio_value
else np.nan
)
account.day_trades_remaining = np.inf
(
account.net_liquidation,
account.gross_leverage,
account.net_leverage,
) = self.calculate_period_stats()
account.leverage = account.gross_leverage
# apply the overrides
for k, v in self._account_overrides.items():
setattr(account, k, v)
# the account has been fully synced
self._dirty_account = False
return self._immutable_account | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/ledger.py | ledger.py |
from abc import ABCMeta, abstractmethod
from zipline.extensions import extensible
from zipline.finance.cancel_policy import NeverCancel
@extensible
class Blotter(metaclass=ABCMeta):
def __init__(self, cancel_policy=None, trading_policy=None): # 20230802 (by MRC) 新增trading_policy功能
self.cancel_policy = cancel_policy if cancel_policy else NeverCancel()
self.trading_policy = trading_policy # 20230802 (by MRC) 新增trading_policy功能
self.current_dt = None
def set_date(self, dt):
self.current_dt = dt
@abstractmethod
def order(self, asset, amount, style, order_id=None):
"""Place an order.
Parameters
----------
asset : zipline.assets.Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
style : zipline.finance.execution.ExecutionStyle
The execution style for the order.
order_id : str, optional
The unique identifier for this order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(asset, amount)
Limit order: order(asset, amount, style=LimitOrder(limit_price))
Stop order: order(asset, amount, style=StopOrder(stop_price))
StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
raise NotImplementedError("order")
def batch_order(self, order_arg_lists):
"""Place a batch of orders.
Parameters
----------
order_arg_lists : iterable[tuple]
Tuples of args that `order` expects.
Returns
-------
order_ids : list[str or None]
The unique identifier (or None) for each of the orders placed
(or not placed).
Notes
-----
This is required for `Blotter` subclasses to be able to place a batch
of orders, instead of being passed the order requests one at a time.
"""
return [self.order(*order_args) for order_args in order_arg_lists]
@abstractmethod
def cancel(self, order_id, relay_status=True):
"""Cancel a single order
Parameters
----------
order_id : int
The id of the order
relay_status : bool
Whether or not to record the status of the order
"""
raise NotImplementedError("cancel")
@abstractmethod
def cancel_all_orders_for_asset(self, asset, warn=False, relay_status=True):
"""
Cancel all open orders for a given asset.
"""
raise NotImplementedError("cancel_all_orders_for_asset")
@abstractmethod
def execute_cancel_policy(self, event):
raise NotImplementedError("execute_cancel_policy")
@abstractmethod
def reject(self, order_id, reason=""):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
raise NotImplementedError("reject")
@abstractmethod
def hold(self, order_id, reason=""):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
raise NotImplementedError("hold")
@abstractmethod
def process_splits(self, splits):
"""
Processes a list of splits by modifying any open orders as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
None
"""
raise NotImplementedError("process_splits")
@abstractmethod
def get_transactions(self, bar_data):
"""
Creates a list of transactions based on the current open orders,
slippage model, and commission model.
Parameters
----------
bar_data: zipline._protocol.BarData
Notes
-----
This method book-keeps the blotter's open_orders dictionary, so that
it is accurate by the time we're done processing open orders.
Returns
-------
transactions_list: List
transactions_list: list of transactions resulting from the current
open orders. If there were no open orders, an empty list is
returned.
commissions_list: List
commissions_list: list of commissions resulting from filling the
open orders. A commission is an object with "asset" and "cost"
parameters.
closed_orders: List
closed_orders: list of all the orders that have filled.
"""
raise NotImplementedError("get_transactions")
@abstractmethod
def prune_orders(self, closed_orders):
"""
Removes all given orders from the blotter's open_orders list.
Parameters
----------
closed_orders: iterable of orders that are closed.
Returns
-------
None
"""
raise NotImplementedError("prune_orders") | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/blotter/blotter.py | blotter.py |
from logbook import Logger
from collections import defaultdict
from copy import copy
from zipline.assets import Equity, Future, Asset
from .blotter import Blotter
from zipline.extensions import register
from zipline.finance.order import Order
from zipline.finance.slippage import (
DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
VolatilityVolumeShare,
FixedBasisPointsSlippage,
)
from zipline.finance.commission import (
DEFAULT_PER_CONTRACT_COST,
FUTURE_EXCHANGE_FEES_BY_SYMBOL,
PerContract,
PerShare,
)
from zipline.utils.input_validation import expect_types
log = Logger("Blotter")
warning_logger = Logger("AlgoWarning")
@register(Blotter, "default")
class SimulationBlotter(Blotter):
def __init__(
self,
equity_slippage=None,
future_slippage=None,
equity_commission=None,
future_commission=None,
cancel_policy=None,
trading_policy=None, # 20230804 (by MRC) 新增trading_policy功能
):
super().__init__(cancel_policy=cancel_policy,
trading_policy=trading_policy) # 20230804 (by MRC) 新增trading_policy功能
# these orders are aggregated by asset
self.open_orders = defaultdict(list)
# keep a dict of orders by their own id
self.orders = {}
# holding orders that have come in since the last event.
self.new_orders = []
self.max_shares = int(1e11)
self.slippage_models = {
Equity: equity_slippage or FixedBasisPointsSlippage(),
Future: future_slippage
or VolatilityVolumeShare(
volume_limit=DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT,
),
}
self.commission_models = {
Equity: equity_commission or PerShare(),
Future: future_commission
or PerContract(
cost=DEFAULT_PER_CONTRACT_COST,
exchange_fee=FUTURE_EXCHANGE_FEES_BY_SYMBOL,
),
}
def __repr__(self):
return """
{class_name}(
slippage_models={slippage_models},
commission_models={commission_models},
open_orders={open_orders},
orders={orders},
new_orders={new_orders},
current_dt={current_dt})
""".strip().format(
class_name=self.__class__.__name__,
slippage_models=self.slippage_models,
commission_models=self.commission_models,
open_orders=self.open_orders,
orders=self.orders,
new_orders=self.new_orders,
current_dt=self.current_dt,
)
@expect_types(asset=Asset)
def order(self, asset, amount, style, order_id=None):
"""Place an order.
Parameters
----------
asset : zipline.assets.Asset
The asset that this order is for.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
style : zipline.finance.execution.ExecutionStyle
The execution style for the order.
order_id : str, optional
The unique identifier for this order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
amount > 0 :: Buy/Cover
amount < 0 :: Sell/Short
Market order: order(asset, amount)
Limit order: order(asset, amount, style=LimitOrder(limit_price))
Stop order: order(asset, amount, style=StopOrder(stop_price))
StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price,
stop_price))
"""
# something could be done with amount to further divide
# between buy by share count OR buy shares up to a dollar amount
# numeric == share count AND "$dollar.cents" == cost amount
if amount == 0:
# Don't bother placing orders for 0 shares.
return None
elif amount > self.max_shares:
# Arbitrary limit of 100 billion (US) shares will never be
# exceeded except by a buggy algorithm.
raise OverflowError(
"Can't order more than %d shares" % self.max_shares
)
is_buy = amount > 0
order = Order(
dt=self.current_dt,
asset=asset,
amount=amount,
stop=style.get_stop_price(is_buy),
limit=style.get_limit_price(is_buy),
id=order_id,
)
self.open_orders[order.asset].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
def cancel(self, order_id, relay_status=True):
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.cancel()
cur_order.dt = self.current_dt
if relay_status:
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def cancel_all_orders_for_asset(self, asset, warn=False, relay_status=True):
"""
Cancel all open orders for a given asset.
"""
# (sadly) open_orders is a defaultdict, so this will always succeed.
orders = self.open_orders[asset]
# We're making a copy here because `cancel` mutates the list of open
# orders in place. The right thing to do here would be to make
# self.open_orders no longer a defaultdict. If we do that, then we
# should just remove the orders once here and be done with the matter.
for order in orders[:]:
self.cancel(order.id, relay_status)
if warn:
# Message appropriately depending on whether there's
# been a partial fill or not.
if order.filled > 0:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} has been partially filled. "
"{order_filled} shares were successfully "
"purchased. {order_failed} shares were not "
"filled by the end of day and "
"were canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=order.filled,
order_failed=order.amount - order.filled,
)
)
elif order.filled < 0:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} has been partially filled. "
"{order_filled} shares were successfully "
"sold. {order_failed} shares were not "
"filled by the end of day and "
"were canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=-1 * order.filled,
order_failed=-1 * (order.amount - order.filled),
)
)
else:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} failed to fill by the end of day "
"and was canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
)
)
assert not orders
del self.open_orders[asset]
# End of day cancel for daily frequency
def execute_daily_cancel_policy(self, event):
if self.cancel_policy.should_cancel(event):
warn = self.cancel_policy.warn_on_cancel
for asset in copy(self.open_orders):
orders = self.open_orders[asset]
if len(orders) > 1:
order = orders[0]
self.cancel(order.id, relay_status=True)
if warn:
if order.filled > 0:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} has been partially filled. "
"{order_filled} shares were successfully "
"purchased. {order_failed} shares were not "
"filled by the end of day and "
"were canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=order.filled,
order_failed=order.amount - order.filled,
)
)
elif order.filled < 0:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} has been partially filled. "
"{order_filled} shares were successfully "
"sold. {order_failed} shares were not "
"filled by the end of day and "
"were canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
order_filled=-1 * order.filled,
order_failed=-1
* (order.amount - order.filled),
)
)
else:
warning_logger.warn(
"Your order for {order_amt} shares of "
"{order_sym} failed to fill by the end of day "
"and was canceled.".format(
order_amt=order.amount,
order_sym=order.asset.symbol,
)
)
def execute_cancel_policy(self, event):
if self.cancel_policy.should_cancel(event):
warn = self.cancel_policy.warn_on_cancel
for asset in copy(self.open_orders):
self.cancel_all_orders_for_asset(
asset, warn, relay_status=False
)
def reject(self, order_id, reason=""):
"""
Mark the given order as 'rejected', which is functionally similar to
cancelled. The distinction is that rejections are involuntary (and
usually include a message from a broker indicating why the order was
rejected) while cancels are typically user-driven.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
order_list = self.open_orders[cur_order.asset]
if cur_order in order_list:
order_list.remove(cur_order)
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.reject(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def hold(self, order_id, reason=""):
"""
Mark the order with order_id as 'held'. Held is functionally similar
to 'open'. When a fill (full or partial) arrives, the status
will automatically change back to open/filled as necessary.
"""
if order_id not in self.orders:
return
cur_order = self.orders[order_id]
if cur_order.open:
if cur_order in self.new_orders:
self.new_orders.remove(cur_order)
cur_order.hold(reason=reason)
cur_order.dt = self.current_dt
# we want this order's new status to be relayed out
# along with newly placed orders.
self.new_orders.append(cur_order)
def process_splits(self, splits):
"""
Processes a list of splits by modifying any open orders as needed.
Parameters
----------
splits: list
A list of splits. Each split is a tuple of (asset, ratio).
Returns
-------
None
"""
for asset, ratio in splits:
if asset not in self.open_orders:
continue
orders_to_modify = self.open_orders[asset]
for order in orders_to_modify:
order.handle_split(ratio)
def get_transactions(self, bar_data):
"""
Creates a list of transactions based on the current open orders,
slippage model, and commission model.
Parameters
----------
bar_data: zipline._protocol.BarData
Notes
-----
This method book-keeps the blotter's open_orders dictionary, so that
it is accurate by the time we're done processing open orders.
Returns
-------
transactions_list: List
transactions_list: list of transactions resulting from the current
open orders. If there were no open orders, an empty list is
returned.
commissions_list: List
commissions_list: list of commissions resulting from filling the
open orders. A commission is an object with "asset" and "cost"
parameters.
closed_orders: List
closed_orders: list of all the orders that have filled.
"""
closed_orders = []
transactions = []
commissions = []
if self.open_orders:
for asset, asset_orders in self.open_orders.items():
slippage = self.slippage_models[type(asset)]
for order, txn in slippage.simulate(
bar_data, asset, asset_orders, self.trading_policy #20230804 (by MRC) 新增trading_policy功能
):
commission = self.commission_models[type(asset)]
additional_commission = commission.calculate(order, txn)
if additional_commission > 0:
commissions.append(
{
"asset": order.asset,
"order": order,
"cost": additional_commission,
}
)
order.filled += txn.amount
order.commission += additional_commission
order.dt = txn.dt
transactions.append(txn)
if not order.open:
closed_orders.append(order)
return transactions, commissions, closed_orders
def prune_orders(self, closed_orders):
"""
Removes all given orders from the blotter's open_orders list.
Parameters
----------
closed_orders: iterable of orders that are closed.
Returns
-------
None
"""
# remove all closed orders from our open_orders dict
for order in closed_orders:
asset = order.asset
asset_orders = self.open_orders[asset]
try:
asset_orders.remove(order)
except ValueError:
continue
# now clear out the assets from our open_orders dict that have
# zero open orders
for asset in list(self.open_orders.keys()):
if len(self.open_orders[asset]) == 0:
del self.open_orders[asset] | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/blotter/simulation_blotter.py | simulation_blotter.py |
import logbook
from ..ledger import Ledger
from zipline.utils.exploding_object import NamedExplodingObject
log = logbook.Logger(__name__)
class MetricsTracker(object):
"""The algorithm's interface to the registered risk and performance
metrics.
Parameters
----------
trading_calendar : TrandingCalendar
The trading calendar used in the simulation.
first_session : pd.Timestamp
The label of the first trading session in the simulation.
last_session : pd.Timestamp
The label of the last trading session in the simulation.
capital_base : float
The starting capital for the simulation.
emission_rate : {'daily', 'minute'}
How frequently should a performance packet be generated?
data_frequency : {'daily', 'minute'}
The data frequency of the data portal.
asset_finder : AssetFinder
The asset finder used in the simulation.
metrics : list[Metric]
The metrics to track.
"""
_hooks = (
"start_of_simulation",
"end_of_simulation",
"start_of_session",
"end_of_session",
"end_of_bar",
)
@staticmethod
def _execution_open_and_close(calendar, session):
open_, close = calendar.open_and_close_for_session(session)
execution_open = calendar.execution_time_from_open(open_)
execution_close = calendar.execution_time_from_close(close)
return execution_open, execution_close
def __init__(
self,
trading_calendar,
first_session,
last_session,
capital_base,
emission_rate,
data_frequency,
asset_finder,
metrics,
):
self.emission_rate = emission_rate
self._trading_calendar = trading_calendar
self._first_session = first_session
self._last_session = last_session
self._capital_base = capital_base
self._asset_finder = asset_finder
self._current_session = first_session
self._market_open, self._market_close = self._execution_open_and_close(
trading_calendar,
first_session,
)
self._session_count = 0
self._sessions = sessions = trading_calendar.sessions_in_range(
first_session,
last_session,
)
self._total_session_count = len(sessions)
self._ledger = Ledger(sessions, capital_base, data_frequency)
self._benchmark_source = NamedExplodingObject(
"self._benchmark_source",
"_benchmark_source is not set until ``handle_start_of_simulation``"
" is called",
)
self._treasury_source = NamedExplodingObject(
"self._treasury_source",
"_treasury_source is not set until ``handle_start_of_simulation``"
" is called",
) #20230209 (by MRC) for treasury return column in perf
if emission_rate == "minute":
def progress(self):
return 1.0 # a fake value
else:
def progress(self):
return self._session_count / self._total_session_count
# don't compare these strings over and over again!
self._progress = progress
# bind all of the hooks from the passed metric objects.
for hook in self._hooks:
registered = []
for metric in metrics:
try:
registered.append(getattr(metric, hook))
except AttributeError:
pass
def closing_over_loop_variables_is_hard(registered=registered):
def hook_implementation(*args, **kwargs):
for impl in registered:
impl(*args, **kwargs)
return hook_implementation
hook_implementation = closing_over_loop_variables_is_hard()
hook_implementation.__name__ = hook
setattr(self, hook, hook_implementation)
def handle_start_of_simulation(self,
benchmark_source,
treasury_source): #20230209 (by MRC) for treasury return column in perf
self._benchmark_source = benchmark_source
self._treasury_source = treasury_source #20230209 (by MRC) for treasury return column in perf
self.start_of_simulation(
self._ledger,
self.emission_rate,
self._trading_calendar,
self._sessions,
benchmark_source,
treasury_source #20230209 (by MRC) for treasury return column in perf
)
@property
def portfolio(self):
return self._ledger.portfolio
@property
def account(self):
return self._ledger.account
@property
def positions(self):
return self._ledger.position_tracker.positions
def update_position(
self,
asset,
amount=None,
last_sale_price=None,
last_sale_date=None,
cost_basis=None,
):
self._ledger.position_tracker.update_position(
asset,
amount,
last_sale_price,
last_sale_date,
cost_basis,
)
def override_account_fields(self, **kwargs):
self._ledger.override_account_fields(**kwargs)
def process_transaction(self, transaction):
self._ledger.process_transaction(transaction)
def handle_splits(self, splits):
self._ledger.process_splits(splits)
def process_order(self, event):
self._ledger.process_order(event)
def process_commission(self, commission):
self._ledger.process_commission(commission)
def process_close_position(self, asset, dt, data_portal):
self._ledger.close_position(asset, dt, data_portal)
def capital_change(self, amount):
self._ledger.capital_change(amount)
def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False):
self._ledger.sync_last_sale_prices(
dt,
data_portal,
handle_non_market_minutes=handle_non_market_minutes,
)
def handle_minute_close(self, dt, data_portal):
"""
Handles the close of the given minute in minute emission.
Parameters
----------
dt : Timestamp
The minute that is ending
Returns
-------
A minute perf packet.
"""
self.sync_last_sale_prices(dt, data_portal)
packet = {
"period_start": self._first_session,
"period_end": self._last_session,
"capital_base": self._capital_base,
"minute_perf": {
"period_open": self._market_open,
"period_close": dt,
},
"cumulative_perf": {
"period_open": self._first_session,
"period_close": self._last_session,
},
"progress": self._progress(self),
"cumulative_risk_metrics": {},
}
ledger = self._ledger
ledger.end_of_bar(self._session_count)
self.end_of_bar(
packet,
ledger,
dt,
self._session_count,
data_portal,
)
return packet
def handle_market_open(self, session_label, data_portal):
"""Handles the start of each session.
Parameters
----------
session_label : Timestamp
The label of the session that is about to begin.
data_portal : DataPortal
The current data portal.
"""
ledger = self._ledger
ledger.start_of_session(session_label)
adjustment_reader = data_portal.adjustment_reader
if adjustment_reader is not None:
# this is None when running with a dataframe source
ledger.process_dividends(
session_label,
self._asset_finder,
adjustment_reader,
)
self._current_session = session_label
cal = self._trading_calendar
self._market_open, self._market_close = self._execution_open_and_close(
cal,
session_label,
)
self.start_of_session(ledger, session_label, data_portal)
def handle_market_close(self, dt, data_portal):
"""Handles the close of the given day.
Parameters
----------
dt : Timestamp
The most recently completed simulation datetime.
data_portal : DataPortal
The current data portal.
Returns
-------
A daily perf packet.
"""
completed_session = self._current_session
if self.emission_rate == "daily":
# this method is called for both minutely and daily emissions, but
# this chunk of code here only applies for daily emissions. (since
# it's done every minute, elsewhere, for minutely emission).
self.sync_last_sale_prices(dt, data_portal)
session_ix = self._session_count
# increment the day counter before we move markers forward.
self._session_count += 1
packet = {
"period_start": self._first_session,
"period_end": self._last_session,
"capital_base": self._capital_base,
"daily_perf": {
"period_open": self._market_open,
"period_close": dt,
},
"cumulative_perf": {
"period_open": self._first_session,
"period_close": self._last_session,
},
"progress": self._progress(self),
"cumulative_risk_metrics": {},
}
ledger = self._ledger
ledger.end_of_session(session_ix)
self.end_of_session(
packet,
ledger,
completed_session,
session_ix,
data_portal,
)
return packet
def handle_simulation_end(self, data_portal):
"""
When the simulation is complete, run the full period risk report
and send it out on the results socket.
"""
log.info(
"Simulated {} trading days\n" "first open: {}\n" "last close: {}",
self._session_count,
self._trading_calendar.session_open(self._first_session),
self._trading_calendar.session_close(self._last_session),
)
packet = {}
self.end_of_simulation(
packet,
self._ledger,
self._trading_calendar,
self._sessions,
data_portal,
self._benchmark_source,
self._treasury_source #20230209 (by MRC) for treasury return column in perf
)
return packet | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/metrics/tracker.py | tracker.py |
from functools import partial
from zipline.utils.compat import mappingproxy
def _make_metrics_set_core():
"""Create a family of metrics sets functions that read from the same
metrics set mapping.
Returns
-------
metrics_sets : mappingproxy
The mapping of metrics sets to load functions.
register : callable
The function which registers new metrics sets in the ``metrics_sets``
mapping.
unregister : callable
The function which deregisters metrics sets from the ``metrics_sets``
mapping.
load : callable
The function which loads the ingested metrics sets back into memory.
"""
_metrics_sets = {}
# Expose _metrics_sets through a proxy so that users cannot mutate this
# accidentally. Users may go through `register` to update this which will
# warn when trampling another metrics set.
metrics_sets = mappingproxy(_metrics_sets)
def register(name, function=None):
"""Register a new metrics set.
Parameters
----------
name : str
The name of the metrics set
function : callable
The callable which produces the metrics set.
Notes
-----
This may be used as a decorator if only ``name`` is passed.
See Also
--------
zipline.finance.metrics.get_metrics_set
zipline.finance.metrics.unregister_metrics_set
"""
if function is None:
# allow as decorator with just name.
return partial(register, name)
if name in _metrics_sets:
raise ValueError("metrics set %r is already registered" % name)
_metrics_sets[name] = function
return function
def unregister(name):
"""Unregister an existing metrics set.
Parameters
----------
name : str
The name of the metrics set
See Also
--------
zipline.finance.metrics.register_metrics_set
"""
try:
del _metrics_sets[name]
except KeyError:
raise ValueError(
"metrics set %r was not already registered" % name,
)
def load(name):
"""Return an instance of the metrics set registered with the given name.
Returns
-------
metrics : set[Metric]
A new instance of the metrics set.
Raises
------
ValueError
Raised when no metrics set is registered to ``name``
"""
try:
function = _metrics_sets[name]
except KeyError:
raise ValueError(
"no metrics set registered as %r, options are: %r"
% (
name,
sorted(_metrics_sets),
),
)
return function()
return metrics_sets, register, unregister, load
metrics_sets, register, unregister, load = _make_metrics_set_core() | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/metrics/core.py | core.py |
import empyrical
from zipline.utils.deprecate import deprecated
from .core import (
metrics_sets,
register,
unregister,
load,
)
from .metric import (
AlphaBeta,
BenchmarkReturnsAndVolatility,
TreasuryReturns, #20230209 (by MRC)
CashFlow,
DailyLedgerField,
MaxLeverage,
NumTradingDays,
Orders,
PeriodLabel,
PNL,
Returns,
ReturnsStatistic,
SimpleLedgerField,
StartOfPeriodLedgerField,
Transactions,
#_ConstantCumulativeRiskMetric, #20230209 (by MRC)
#_ClassicRiskMetrics, #20230209 (by MRC)
)
from .tracker import MetricsTracker
__all__ = ["MetricsTracker", "unregister", "metrics_sets", "load"]
register("none", set)
@register("default")
def default_metrics():
return {
Returns(),
ReturnsStatistic(empyrical.annual_volatility, "algo_volatility"),
BenchmarkReturnsAndVolatility(),
TreasuryReturns(), #20230209 (by MRC) for treasury return column
PNL(),
CashFlow(),
Orders(),
Transactions(),
SimpleLedgerField("positions"),
StartOfPeriodLedgerField(
"portfolio.positions_exposure",
"starting_exposure",
),
DailyLedgerField(
"portfolio.positions_exposure",
"ending_exposure",
),
StartOfPeriodLedgerField("portfolio.positions_value", "starting_value"),
DailyLedgerField("portfolio.positions_value", "ending_value"),
StartOfPeriodLedgerField("portfolio.cash", "starting_cash"),
DailyLedgerField("portfolio.cash", "ending_cash"),
DailyLedgerField("portfolio.portfolio_value"),
DailyLedgerField("position_tracker.stats.longs_count"),
DailyLedgerField("position_tracker.stats.shorts_count"),
DailyLedgerField("position_tracker.stats.long_value"),
DailyLedgerField("position_tracker.stats.short_value"),
DailyLedgerField("position_tracker.stats.long_exposure"),
DailyLedgerField("position_tracker.stats.short_exposure"),
DailyLedgerField("account.gross_leverage"),
DailyLedgerField("account.net_leverage"),
AlphaBeta(),
ReturnsStatistic(empyrical.sharpe_ratio, "sharpe"),
ReturnsStatistic(empyrical.sortino_ratio, "sortino"),
ReturnsStatistic(empyrical.max_drawdown),
MaxLeverage(),
# Please kill these!
#_ConstantCumulativeRiskMetric("excess_return", 0.0), #20230210 (by MRC) modify excess_return
#_ConstantCumulativeRiskMetric("treasury_period_return", 0.0), #20230210 (by MRC) modify treasury_period_return
NumTradingDays(),
PeriodLabel(),
}
@register("classic")
@deprecated(
"The original risk packet has been deprecated and will be removed in a "
'future release. Please use "default" metrics instead.'
)
def classic_metrics():
metrics = default_metrics()
metrics.add(_ClassicRiskMetrics())
return metrics | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/metrics/__init__.py | __init__.py |
import datetime
from functools import partial
import operator as op
from dateutil.relativedelta import relativedelta
import empyrical as ep
import numpy as np
import pandas as pd
from zipline.utils.exploding_object import NamedExplodingObject
from zipline.finance._finance_ext import minute_annual_volatility
class SimpleLedgerField(object):
"""Emit the current value of a ledger field every bar or every session.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit(".", 1)[-1]
else:
self._packet_field = packet_field
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
packet["minute_perf"][self._packet_field] = self._get_ledger_field(
ledger,
)
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
packet["daily_perf"][self._packet_field] = self._get_ledger_field(
ledger,
)
class DailyLedgerField(object):
"""Like :class:`~zipline.finance.metrics.metric.SimpleLedgerField` but
also puts the current value in the ``cumulative_perf`` section.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit(".", 1)[-1]
else:
self._packet_field = packet_field
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
field = self._packet_field
packet["cumulative_perf"][field] = packet["minute_perf"][
field
] = self._get_ledger_field(ledger)
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
field = self._packet_field
packet["cumulative_perf"][field] = packet["daily_perf"][
field
] = self._get_ledger_field(ledger)
class StartOfPeriodLedgerField(object):
"""Keep track of the value of a ledger field at the start of the period.
Parameters
----------
ledger_field : str
The ledger field to read.
packet_field : str, optional
The name of the field to populate in the packet. If not provided,
``ledger_field`` will be used.
"""
def __init__(self, ledger_field, packet_field=None):
self._get_ledger_field = op.attrgetter(ledger_field)
if packet_field is None:
self._packet_field = ledger_field.rsplit(".", 1)[-1]
else:
self._packet_field = packet_field
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source #20230209 (by MRC)
):
self._start_of_simulation = self._get_ledger_field(ledger)
def start_of_session(self, ledger, session, data_portal):
self._previous_day = self._get_ledger_field(ledger)
def _end_of_period(self, sub_field, packet, ledger):
packet_field = self._packet_field
packet["cumulative_perf"][packet_field] = self._start_of_simulation
packet[sub_field][packet_field] = self._previous_day
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
self._end_of_period("minute_perf", packet, ledger)
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
self._end_of_period("daily_perf", packet, ledger)
class Returns(object):
"""Tracks the daily and cumulative returns of the algorithm."""
def _end_of_period(field, packet, ledger, dt, session_ix, data_portal):
packet[field]["returns"] = ledger.todays_returns
packet["cumulative_perf"]["returns"] = ledger.portfolio.returns
packet["cumulative_risk_metrics"][
"algorithm_period_return"
] = ledger.portfolio.returns
end_of_bar = partial(_end_of_period, "minute_perf")
end_of_session = partial(_end_of_period, "daily_perf")
class BenchmarkReturnsAndVolatility(object):
"""Tracks daily and cumulative returns for the benchmark as well as the
volatility of the benchmark returns.
"""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source #20230209 (by MRC)
):
daily_returns_series = benchmark_source.daily_returns(
sessions[0],
sessions[-1],
)
self._daily_returns = daily_returns_array = daily_returns_series.values
#self._daily_cumulative_returns = np.cumprod(1 + daily_returns_array) - 1 #20230223 (by MRC) 前幾期是na後面會全部na
self._daily_cumulative_returns = np.nancumprod(1 + daily_returns_array) - 1 #20230223 (by MRC) 前幾期是na後面會全部na
self._daily_annual_volatility = (
daily_returns_series.expanding(2).std(ddof=1) * np.sqrt(252)
).values
if emission_rate == "daily":
self._minute_cumulative_returns = NamedExplodingObject(
"self._minute_cumulative_returns",
"does not exist in daily emission rate",
)
self._minute_annual_volatility = NamedExplodingObject(
"self._minute_annual_volatility",
"does not exist in daily emission rate",
)
else:
open_ = trading_calendar.session_open(sessions[0])
close = trading_calendar.session_close(sessions[-1])
returns = benchmark_source.get_range(open_, close)
self._minute_cumulative_returns = (1 + returns).cumprod() - 1
self._minute_annual_volatility = pd.Series(
minute_annual_volatility(
returns.index.normalize().view("int64"),
returns.values,
daily_returns_array,
),
index=returns.index,
)
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
r = self._minute_cumulative_returns[dt]
if np.isnan(r):
r = None
s = self.returns[session_ix] #20230211 (by MRC) for benchmark_return in perf
if np.isnan(s):
s = None #20230211 (by MRC) for benchmark_return in perf
packet["cumulative_risk_metrics"]["benchmark_period_return"] = r
if (ledger.portfolio.returns is None) or (r is None): #20230222 (by MRC) an error raise when r is None
packet["cumulative_risk_metrics"]["excess_return"] = np.nan #20230222 (by MRC) an error raise when r is None
else:
packet["cumulative_risk_metrics"]["excess_return"] = ledger.portfolio.returns-r #20230210 (by MRC) for excess return in perf
packet["daily_perf"]["benchmark_return"] = s #20230211 (by MRC) for benchmark_return in perf
v = self._minute_annual_volatility[dt]
if np.isnan(v):
v = None
packet["cumulative_risk_metrics"]["benchmark_volatility"] = v
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
r = self._daily_cumulative_returns[session_ix]
if np.isnan(r):
r = None
s = self._daily_returns[session_ix] #20230211 (by MRC) for benchmark_return in perf
if np.isnan(s):
s = None #20230211 (by MRC) for benchmark_return in perf
packet["cumulative_risk_metrics"]["benchmark_period_return"] = r
if (ledger.portfolio.returns is None) or (r is None): #20230222 (by MRC) an error raise when r is None
packet["cumulative_risk_metrics"]["excess_return"] = np.nan #20230222 (by MRC) an error raise when r is None
else:
packet["cumulative_risk_metrics"]["excess_return"] = ledger.portfolio.returns-r #20230210 (by MRC) for excess return in perf
packet["daily_perf"]["benchmark_return"] = s #20230211 (by MRC) for benchmark_return in perf
v = self._daily_annual_volatility[session_ix]
if np.isnan(v):
v = None
packet["cumulative_risk_metrics"]["benchmark_volatility"] = v
#--------------------------------------------------------------------
#20230209 (by MRC) for treasury return column in perf #start#
class TreasuryReturns(object):
"""Tracks daily and cumulative returns for the treasury.
Reference
----------
BenchmarkReturnsAndVolatility(object)
"""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source ,treasury_source
):
daily_returns_series = treasury_source.daily_returns(
sessions[0],
sessions[-1],
)
self._daily_returns = daily_returns_array = daily_returns_series.values
self._daily_cumulative_returns = np.cumprod(1 + daily_returns_array) - 1
if emission_rate == "daily":
self._minute_cumulative_returns = NamedExplodingObject(
"self._minute_cumulative_returns",
"does not exist in daily emission rate",
)
else:
open_ = trading_calendar.session_open(sessions[0])
close = trading_calendar.session_close(sessions[-1])
returns = treasury_source.get_range(open_, close)
self._minute_cumulative_returns = (1 + returns).cumprod() - 1 #skipna=True
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
r = self._minute_cumulative_returns[dt]
if np.isnan(r):
r = None
s = self.returns[session_ix] #20230211 (by MRC)
if np.isnan(s):
s = None #20230211 (by MRC)
packet["cumulative_risk_metrics"]["treasury_period_return"] = r
#packet["cumulative_risk_metrics"]["excess_return(relative_to_treasury)"]=ledger.portfolio.returns-r #20230210 (by MRC)
packet["daily_perf"]["treasury_return"] = s #20230211 (by MRC)
'''
v = self._minute_annual_volatility[dt]
if np.isnan(v):
v = None
packet["cumulative_risk_metrics"]["benchmark_volatility"] = v
'''
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
r = self._daily_cumulative_returns[session_ix]
if np.isnan(r):
r = None
s = self._daily_returns[session_ix] #20230211 (by MRC)
if np.isnan(s):
s = None #20230211 (by MRC)
packet["cumulative_risk_metrics"]["treasury_period_return"] = r
#packet["cumulative_risk_metrics"]["excess_return(relative_to_treasury)"]=ledger.portfolio.returns-r #20230210 (by MRC)
packet["daily_perf"]["treasury_return"] = s #20230211 (by MRC)
'''
v = self._daily_annual_volatility[session_ix]
if np.isnan(v):
v = None
packet["cumulative_risk_metrics"]["benchmark_volatility"] = v
'''
#20230209 (by MRC) for treasury return column in perf #end#
#--------------------------------------------------------------------
class PNL(object):
"""Tracks daily and cumulative PNL."""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source #20230209 (by MRC)
):
self._previous_pnl = 0.0
def start_of_session(self, ledger, session, data_portal):
self._previous_pnl = ledger.portfolio.pnl
def _end_of_period(self, field, packet, ledger):
pnl = ledger.portfolio.pnl
packet[field]["pnl"] = pnl - self._previous_pnl
packet["cumulative_perf"]["pnl"] = ledger.portfolio.pnl
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
self._end_of_period("minute_perf", packet, ledger)
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
self._end_of_period("daily_perf", packet, ledger)
class CashFlow(object):
"""Tracks daily and cumulative cash flow.
Notes
-----
For historical reasons, this field is named 'capital_used' in the packets.
"""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source #20230209 (by MRC)
):
self._previous_cash_flow = 0.0
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
cash_flow = ledger.portfolio.cash_flow
packet["minute_perf"]["capital_used"] = cash_flow - self._previous_cash_flow
packet["cumulative_perf"]["capital_used"] = cash_flow
def end_of_session(self, packet, ledger, session, session_ix, data_portal):
cash_flow = ledger.portfolio.cash_flow
packet["daily_perf"]["capital_used"] = cash_flow - self._previous_cash_flow
packet["cumulative_perf"]["capital_used"] = cash_flow
self._previous_cash_flow = cash_flow
class Orders(object):
"""Tracks daily orders."""
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
packet["minute_perf"]["orders"] = ledger.orders(dt)
def end_of_session(self, packet, ledger, dt, session_ix, data_portal):
packet["daily_perf"]["orders"] = ledger.orders()
class Transactions(object):
"""Tracks daily transactions."""
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
packet["minute_perf"]["transactions"] = ledger.transactions(dt)
def end_of_session(self, packet, ledger, dt, session_ix, data_portal):
packet["daily_perf"]["transactions"] = ledger.transactions()
class Positions(object):
"""Tracks daily positions."""
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
packet["minute_perf"]["positions"] = ledger.positions(dt)
def end_of_session(self, packet, ledger, dt, session_ix, data_portal):
packet["daily_perf"]["positions"] = ledger.positions()
class ReturnsStatistic(object):
"""A metric that reports an end of simulation scalar or time series
computed from the algorithm returns.
Parameters
----------
function : callable
The function to call on the daily returns.
field_name : str, optional
The name of the field. If not provided, it will be
``function.__name__``.
"""
def __init__(self, function, field_name=None):
if field_name is None:
field_name = function.__name__
self._function = function
self._field_name = field_name
#--------------------------------------------------------------------
#20230210 (by MRC) for risk free rate in sharpe and sortino #start#
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source
):
daily_returns_series = treasury_source.daily_returns(
sessions[0],
sessions[-1],
)
self._daily_returns = daily_returns_array = daily_returns_series.values
#20230210 (by MRC) or risk free rate in sharpe and sortino #end#
#--------------------------------------------------------------------
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
#res = self._function(ledger.daily_returns_array[: session_ix + 1]) #20230210 (by MRC)
#--------------------------------------------------------------------
#20230210 (by MRC) for risk free rate in sharpe and sortino #start#
#pyfolio.timeseries.perf_func()
self._mean_risk_free = np.nanmean(self._daily_returns[: session_ix + 1])
if self._field_name in ['sharpe']:
res = self._function(returns=ledger.daily_returns_array[: session_ix + 1],
risk_free=0)
#risk_free=self._mean_risk_free)
elif self._field_name in ['sortino']:
res = self._function(returns=ledger.daily_returns_array[: session_ix + 1],
required_return=0)
#required_return=self._mean_risk_free)
elif self._field_name in ['max_drawdown','algo_volatility']:
res = self._function(ledger.daily_returns_array[: session_ix + 1])
else:
res = np.inf
#20230210 (by MRC) or risk free rate in sharpe and sortino #end#
#--------------------------------------------------------------------
if not np.isfinite(res):
res = None
packet["cumulative_risk_metrics"][self._field_name] = res
end_of_session = end_of_bar
class AlphaBeta(object):
"""End of simulation alpha and beta to the benchmark."""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source,treasury_source #20230209 (by MRC) for treasury return column in perf
):
self._daily_returns_array = benchmark_source.daily_returns(
sessions[0],
sessions[-1],
).values
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
risk = packet["cumulative_risk_metrics"]
alpha, beta = ep.alpha_beta_aligned(
ledger.daily_returns_array[: session_ix + 1],
self._daily_returns_array[: session_ix + 1],
)
if not np.isfinite(alpha):
alpha = None
if np.isnan(beta):
beta = None
risk["alpha"] = alpha
risk["beta"] = beta
end_of_session = end_of_bar
class MaxLeverage(object):
"""Tracks the maximum account leverage."""
def start_of_simulation(self, *args):
self._max_leverage = 0.0
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
self._max_leverage = max(self._max_leverage, ledger.account.leverage)
packet["cumulative_risk_metrics"]["max_leverage"] = self._max_leverage
end_of_session = end_of_bar
class NumTradingDays(object):
"""Report the number of trading days."""
def start_of_simulation(self, *args):
self._num_trading_days = 0
def start_of_session(self, *args):
self._num_trading_days += 1
def end_of_bar(self, packet, ledger, dt, session_ix, data_portal):
packet["cumulative_risk_metrics"]["trading_days"] = self._num_trading_days
end_of_session = end_of_bar
class _ConstantCumulativeRiskMetric(object):
"""
Note:useless now
A metric which does not change, ever.
Notes
-----
This exists to maintain the existing structure of the perf packets. We
should kill this as soon as possible.
"""
def __init__(self, field, value):
self._field = field
self._value = value
def end_of_bar(self, packet, *args):
packet["cumulative_risk_metrics"][self._field] = self._value
def end_of_session(self, packet, *args):
packet["cumulative_risk_metrics"][self._field] = self._value
class PeriodLabel(object):
"""Backwards compat, please kill me."""
def start_of_session(self, ledger, session, data_portal):
self._label = session.strftime("%Y-%m")
def end_of_bar(self, packet, *args):
packet["cumulative_risk_metrics"]["period_label"] = self._label
end_of_session = end_of_bar
class _ClassicRiskMetrics(object):
"""
Note:it seems useless
Produces original risk packet.
"""
def start_of_simulation(
self, ledger, emission_rate, trading_calendar, sessions, benchmark_source
):
self._leverages = np.full_like(sessions, np.nan, dtype="float64")
def end_of_session(self, packet, ledger, dt, session_ix, data_portal):
self._leverages[session_ix] = ledger.account.leverage
@classmethod
def risk_metric_period(
cls,
start_session,
end_session,
algorithm_returns,
benchmark_returns,
algorithm_leverages,
):
"""
Creates a dictionary representing the state of the risk report.
Parameters
----------
start_session : pd.Timestamp
Start of period (inclusive) to produce metrics on
end_session : pd.Timestamp
End of period (inclusive) to produce metrics on
algorithm_returns : pd.Series(pd.Timestamp -> float)
Series of algorithm returns as of the end of each session
benchmark_returns : pd.Series(pd.Timestamp -> float)
Series of benchmark returns as of the end of each session
algorithm_leverages : pd.Series(pd.Timestamp -> float)
Series of algorithm leverages as of the end of each session
Returns
-------
risk_metric : dict[str, any]
Dict of metrics that with fields like:
{
'algorithm_period_return': 0.0,
'benchmark_period_return': 0.0,
'treasury_period_return': 0,
'excess_return': 0.0,
'alpha': 0.0,
'beta': 0.0,
'sharpe': 0.0,
'sortino': 0.0,
'period_label': '1970-01',
'trading_days': 0,
'algo_volatility': 0.0,
'benchmark_volatility': 0.0,
'max_drawdown': 0.0,
'max_leverage': 0.0,
}
"""
algorithm_returns = algorithm_returns[
(algorithm_returns.index >= start_session)
& (algorithm_returns.index <= end_session)
]
# Benchmark needs to be masked to the same dates as the algo returns
benchmark_returns = benchmark_returns[
(benchmark_returns.index >= start_session)
& (benchmark_returns.index <= algorithm_returns.index[-1])
]
benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1]
algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1]
alpha, beta = ep.alpha_beta_aligned(
algorithm_returns.values,
benchmark_returns.values,
)
benchmark_volatility = ep.annual_volatility(benchmark_returns)
sharpe = ep.sharpe_ratio(algorithm_returns)
# The consumer currently expects a 0.0 value for sharpe in period,
# this differs from cumulative which was np.nan.
# When factoring out the sharpe_ratio, the different return types
# were collapsed into `np.nan`.
# TODO: Either fix consumer to accept `np.nan` or make the
# `sharpe_ratio` return type configurable.
# In the meantime, convert nan values to 0.0
if pd.isnull(sharpe):
sharpe = 0.0
sortino = ep.sortino_ratio(
algorithm_returns.values,
_downside_risk=ep.downside_risk(algorithm_returns.values),
)
rval = {
"algorithm_period_return": algorithm_period_returns,
"benchmark_period_return": benchmark_period_returns,
"treasury_period_return": 0,
"excess_return": algorithm_period_returns,
"alpha": alpha,
"beta": beta,
"sharpe": sharpe,
"sortino": sortino,
"period_label": end_session.strftime("%Y-%m"),
"trading_days": len(benchmark_returns),
"algo_volatility": ep.annual_volatility(algorithm_returns),
"benchmark_volatility": benchmark_volatility,
"max_drawdown": ep.max_drawdown(algorithm_returns.values),
"max_leverage": algorithm_leverages.max(),
}
# check if a field in rval is nan or inf, and replace it with None
# except period_label which is always a str
return {
k: (None if k != "period_label" and not np.isfinite(v) else v)
for k, v in rval.items()
}
@classmethod
def _periods_in_range(
cls,
months,
end_session,
end_date,
algorithm_returns,
benchmark_returns,
algorithm_leverages,
months_per,
):
if months.size < months_per:
return
tzinfo = end_date.tzinfo
end_date = end_date.tz_convert(None)
for period_timestamp in months:
period = period_timestamp.tz_localize(None).to_period(
freq="%dM" % months_per
)
if period.end_time > end_date:
break
yield cls.risk_metric_period(
start_session=period.start_time.tz_localize(tzinfo),
end_session=min(period.end_time, end_session).tz_localize(tzinfo),
algorithm_returns=algorithm_returns,
benchmark_returns=benchmark_returns,
algorithm_leverages=algorithm_leverages,
)
@classmethod
def risk_report(cls, algorithm_returns, benchmark_returns, algorithm_leverages):
start_session = algorithm_returns.index[0]
end_session = algorithm_returns.index[-1]
end = end_session.replace(day=1) + relativedelta(months=1)
months = pd.date_range(
start=start_session,
# Ensure we have at least one month
end=end - datetime.timedelta(days=1),
freq="M",
tz="utc",
)
periods_in_range = partial(
cls._periods_in_range,
months=months,
end_session=end_session.tz_convert(None),
end_date=end,
algorithm_returns=algorithm_returns,
benchmark_returns=benchmark_returns,
algorithm_leverages=algorithm_leverages,
)
return {
"one_month": list(periods_in_range(months_per=1)),
"three_month": list(periods_in_range(months_per=3)),
"six_month": list(periods_in_range(months_per=6)),
"twelve_month": list(periods_in_range(months_per=12)),
}
def end_of_simulation(
self, packet, ledger, trading_calendar, sessions, data_portal, benchmark_source
):
packet.update(
self.risk_report(
algorithm_returns=ledger.daily_returns_series,
benchmark_returns=benchmark_source.daily_returns(
sessions[0],
sessions[-1],
),
algorithm_leverages=self._leverages,
)
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/finance/metrics/metric.py | metric.py |
from textwrap import dedent
from functools import partial
from numpy import (
bool_,
dtype,
float32,
float64,
int32,
int64,
int16,
uint16,
ndarray,
uint32,
uint8,
)
from toolz import merge_with
from zipline.errors import (
WindowLengthNotPositive,
WindowLengthTooLong,
)
from zipline.lib.labelarray import LabelArray
from zipline.utils.numpy_utils import (
datetime64ns_dtype,
float64_dtype,
int64_dtype,
uint8_dtype,
)
from zipline.utils.memoize import lazyval
# These class names are all the same because of our bootleg templating system.
from ._float64window import AdjustedArrayWindow as Float64Window
from ._int64window import AdjustedArrayWindow as Int64Window
from ._labelwindow import AdjustedArrayWindow as LabelWindow
from ._uint8window import AdjustedArrayWindow as UInt8Window
BOOL_DTYPES = frozenset(
map(dtype, [bool_, uint8]),
)
FLOAT_DTYPES = frozenset(
map(dtype, [float32, float64]),
)
INT_DTYPES = frozenset(
# NOTE: uint64 not supported because it can't be safely cast to int64.
map(dtype, [int16, uint16, int32, int64, uint32]),
)
DATETIME_DTYPES = frozenset(
map(dtype, ["datetime64[ns]", "datetime64[D]"]),
)
# We use object arrays for strings.
OBJECT_DTYPES = frozenset(map(dtype, ["O"]))
STRING_KINDS = frozenset(["S", "U"])
REPRESENTABLE_DTYPES = BOOL_DTYPES.union(
FLOAT_DTYPES,
INT_DTYPES,
DATETIME_DTYPES,
OBJECT_DTYPES,
)
def can_represent_dtype(dtype):
"""
Can we build an AdjustedArray for a baseline of `dtype``?
"""
return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS
def is_categorical(dtype):
"""
Do we represent this dtype with LabelArrays rather than ndarrays?
"""
return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS
CONCRETE_WINDOW_TYPES = {
float64_dtype: Float64Window,
int64_dtype: Int64Window,
uint8_dtype: UInt8Window,
}
def _normalize_array(data, missing_value):
"""
Coerce buffer data for an AdjustedArray into a standard scalar
representation, returning the coerced array and a dict of argument to pass
to np.view to use when providing a user-facing view of the underlying data.
- float* data is coerced to float64 with viewtype float64.
- int32, int64, and uint32 are converted to int64 with viewtype int64.
- datetime[*] data is coerced to int64 with a viewtype of datetime64[ns].
- bool_ data is coerced to uint8 with a viewtype of bool_.
Parameters
----------
data : np.ndarray
Returns
-------
coerced, view_kwargs : (np.ndarray, np.dtype)
The input ``data`` array coerced to the appropriate pipeline type.
This may return the original array or a view over the same data.
"""
if isinstance(data, LabelArray):
return data, {}
data_dtype = data.dtype
if data_dtype in BOOL_DTYPES:
return data.astype(uint8, copy=False), {"dtype": dtype(bool_)}
elif data_dtype in FLOAT_DTYPES:
return data.astype(float64, copy=False), {"dtype": dtype(float64)}
elif data_dtype in INT_DTYPES:
return data.astype(int64, copy=False), {"dtype": dtype(int64)}
elif is_categorical(data_dtype):
if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES):
raise TypeError(
"Invalid missing_value for categorical array.\n"
"Expected None, bytes or unicode. Got %r." % missing_value,
)
return LabelArray(data, missing_value), {}
elif data_dtype.kind == "M":
try:
outarray = data.astype("datetime64[ns]", copy=False).view("int64")
return outarray, {"dtype": datetime64ns_dtype}
except OverflowError:
raise ValueError(
"AdjustedArray received a datetime array "
"not representable as datetime64[ns].\n"
"Min Date: %s\n"
"Max Date: %s\n" % (data.min(), data.max())
)
else:
raise TypeError(
"Don't know how to construct AdjustedArray "
"on data of type %s." % data_dtype
)
def _merge_simple(adjustment_lists, front_idx, back_idx):
"""
Merge lists of new and existing adjustments for a given index by appending
or prepending new adjustments to existing adjustments.
Notes
-----
This method is meant to be used with ``toolz.merge_with`` to merge
adjustment mappings. In case of a collision ``adjustment_lists`` contains
two lists, existing adjustments at index 0 and new adjustments at index 1.
When there are no collisions, ``adjustment_lists`` contains a single list.
Parameters
----------
adjustment_lists : list[list[Adjustment]]
List(s) of new and/or existing adjustments for a given index.
front_idx : int
Index of list in ``adjustment_lists`` that should be used as baseline
in case of a collision.
back_idx : int
Index of list in ``adjustment_lists`` that should extend baseline list
in case of a collision.
Returns
-------
adjustments : list[Adjustment]
List of merged adjustments for a given index.
"""
if len(adjustment_lists) == 1:
return list(adjustment_lists[0])
else:
return adjustment_lists[front_idx] + adjustment_lists[back_idx]
_merge_methods = {
"append": partial(_merge_simple, front_idx=0, back_idx=1),
"prepend": partial(_merge_simple, front_idx=1, back_idx=0),
}
class AdjustedArray(object):
"""
An array that can be iterated with a variable-length window, and which can
provide different views on data from different perspectives.
Parameters
----------
data : np.ndarray
The baseline data values. This array may be mutated by
``traverse(..., copy=False)`` calls.
adjustments : dict[int -> list[Adjustment]]
A dict mapping row indices to lists of adjustments to apply when we
reach that row.
missing_value : object
A value to use to fill missing data in yielded windows.
Should be a value coercible to `data.dtype`.
"""
__slots__ = (
"_data",
"_view_kwargs",
"adjustments",
"missing_value",
"_invalidated",
"__weakref__",
)
def __init__(self, data, adjustments, missing_value):
self._data, self._view_kwargs = _normalize_array(data, missing_value)
self.adjustments = adjustments
self.missing_value = missing_value
self._invalidated = False
def copy(self):
"""Copy an adjusted array, deep-copying the ``data`` array."""
if self._invalidated:
raise ValueError("cannot copy invalidated AdjustedArray")
return type(self)(
self.data.copy(order="F"),
self.adjustments,
self.missing_value,
)
def update_adjustments(self, adjustments, method):
"""
Merge ``adjustments`` with existing adjustments, handling index
collisions according to ``method``.
Parameters
----------
adjustments : dict[int -> list[Adjustment]]
The mapping of row indices to lists of adjustments that should be
appended to existing adjustments.
method : {'append', 'prepend'}
How to handle index collisions. If 'append', new adjustments will
be applied after previously-existing adjustments. If 'prepend', new
adjustments will be applied before previously-existing adjustments.
"""
try:
merge_func = _merge_methods[method]
except KeyError:
raise ValueError(
"Invalid merge method %s\n"
"Valid methods are: %s" % (method, ", ".join(_merge_methods))
)
self.adjustments = merge_with(
merge_func,
self.adjustments,
adjustments,
)
@property
def data(self):
"""
The data stored in this array.
"""
return self._data.view(**self._view_kwargs)
@lazyval
def dtype(self):
"""
The dtype of the data stored in this array.
"""
return self._view_kwargs.get("dtype") or self._data.dtype
@lazyval
def _iterator_type(self):
"""
The iterator produced when `traverse` is called on this Array.
"""
if isinstance(self._data, LabelArray):
return LabelWindow
return CONCRETE_WINDOW_TYPES[self._data.dtype]
def traverse(self, window_length, offset=0, perspective_offset=0, copy=True):
"""
Produce an iterator rolling windows rows over our data.
Each emitted window will have `window_length` rows.
Parameters
----------
window_length : int
The number of rows in each emitted window.
offset : int, optional
Number of rows to skip before the first window. Default is 0.
perspective_offset : int, optional
Number of rows past the end of the current window from which to
"view" the underlying data.
copy : bool, optional
Copy the underlying data. If ``copy=False``, the adjusted array
will be invalidated and cannot be traversed again.
"""
if self._invalidated:
raise ValueError("cannot traverse invalidated AdjustedArray")
data = self._data
if copy:
data = data.copy(order="F")
else:
self._invalidated = True
_check_window_params(data, window_length)
return self._iterator_type(
data,
self._view_kwargs,
self.adjustments,
offset,
window_length,
perspective_offset,
rounding_places=None,
)
def inspect(self):
"""
Return a string representation of the data stored in this array.
"""
return dedent(
"""\
Adjusted Array ({dtype}):
Data:
{data!r}
Adjustments:
{adjustments}
"""
).format(
dtype=self.dtype.name,
data=self.data,
adjustments=self.adjustments,
)
def update_labels(self, func):
"""
Map a function over baseline and adjustment values in place.
Note that the baseline data values must be a LabelArray.
"""
if not isinstance(self.data, LabelArray):
raise TypeError(
"update_labels only supported if data is of type LabelArray."
)
# Map the baseline values.
self._data = self._data.map(func)
# Map each of the adjustments.
for _, row_adjustments in self.adjustments.items():
for adjustment in row_adjustments:
adjustment.value = func(adjustment.value)
def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value):
if isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, ndarray):
return AdjustedArray(
ndarray_or_adjusted_array,
{},
missing_value,
)
else:
raise TypeError(
"Can't convert %s to AdjustedArray"
% type(ndarray_or_adjusted_array).__name__
)
def ensure_ndarray(ndarray_or_adjusted_array):
"""
Return the input as a numpy ndarray.
This is a no-op if the input is already an ndarray. If the input is an
adjusted_array, this extracts a read-only view of its internal data buffer.
Parameters
----------
ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array
Returns
-------
out : The input, converted to an ndarray.
"""
if isinstance(ndarray_or_adjusted_array, ndarray):
return ndarray_or_adjusted_array
elif isinstance(ndarray_or_adjusted_array, AdjustedArray):
return ndarray_or_adjusted_array.data
else:
raise TypeError(
"Can't convert %s to ndarray" % type(ndarray_or_adjusted_array).__name__
)
def _check_window_params(data, window_length):
"""
Check that a window of length `window_length` is well-defined on `data`.
Parameters
----------
data : np.ndarray[ndim=2]
The array of data to check.
window_length : int
Length of the desired window.
Returns
-------
None
Raises
------
WindowLengthNotPositive
If window_length < 1.
WindowLengthTooLong
If window_length is greater than the number of rows in `data`.
"""
if window_length < 1:
raise WindowLengthNotPositive(window_length=window_length)
if window_length > data.shape[0]:
raise WindowLengthTooLong(
nrows=data.shape[0],
window_length=window_length,
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/lib/adjusted_array.py | adjusted_array.py |
from functools import partial, total_ordering
from operator import eq, ne
import re
import numpy as np
from numpy import ndarray
import pandas as pd
from toolz import compose
from zipline.utils.compat import unicode
from zipline.utils.functional import instance
from zipline.utils.preprocess import preprocess
from zipline.utils.sentinel import sentinel
from zipline.utils.input_validation import (
coerce,
expect_kinds,
expect_types,
optional,
)
from zipline.utils.numpy_utils import (
bool_dtype,
unsigned_int_dtype_with_size_in_bytes,
is_object,
object_dtype,
)
from zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning
from ._factorize import (
factorize_strings,
factorize_strings_known_categories,
smallest_uint_that_can_hold,
)
def compare_arrays(left, right):
"Eq check with a short-circuit for identical objects."
return left is right or (
(left.shape == right.shape) and (left == right).all()
)
def _make_unsupported_method(name):
def method(*args, **kwargs):
raise NotImplementedError(
"Method %s is not supported on LabelArrays." % name
)
method.__name__ = name
method.__doc__ = "Unsupported LabelArray Method: %s" % name
return method
class MissingValueMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched missing_values.
"""
def __init__(self, left, right):
super(MissingValueMismatch, self).__init__(
"LabelArray missing_values don't match:"
" left={}, right={}".format(left, right)
)
class CategoryMismatch(ValueError):
"""
Error raised on attempt to perform operations between LabelArrays with
mismatched category arrays.
"""
def __init__(self, left, right):
(mismatches,) = np.where(left != right)
assert len(mismatches), "Not actually a mismatch!"
super(CategoryMismatch, self).__init__(
"LabelArray categories don't match:\n"
"Mismatched Indices: {mismatches}\n"
"Left: {left}\n"
"Right: {right}".format(
mismatches=mismatches,
left=left[mismatches],
right=right[mismatches],
)
)
_NotPassed = sentinel("_NotPassed")
class LabelArray(ndarray):
"""
An ndarray subclass for working with arrays of strings.
Factorizes the input array into integers, but overloads equality on strings
to check against the factor label.
Parameters
----------
values : array-like
Array of values that can be passed to np.asarray with dtype=object.
missing_value : str
Scalar value to treat as 'missing' for operations on ``self``.
categories : list[str], optional
List of values to use as categories. If not supplied, categories will
be inferred as the unique set of entries in ``values``.
sort : bool, optional
Whether to sort categories. If sort is False and categories is
supplied, they are left in the order provided. If sort is False and
categories is None, categories will be constructed in a random order.
Attributes
----------
categories : ndarray[str]
An array containing the unique labels of self.
reverse_categories : dict[str -> int]
Reverse lookup table for ``categories``. Stores the index in
``categories`` at which each entry each unique entry is found.
missing_value : str or None
A sentinel missing value with NaN semantics for comparisons.
Notes
-----
Consumers should be cautious when passing instances of LabelArray to numpy
functions. We attempt to disallow as many meaningless operations as
possible, but since a LabelArray is just an ndarray of ints with some
additional metadata, many numpy functions (for example, trigonometric) will
happily accept a LabelArray and treat its values as though they were
integers.
In a future change, we may be able to disallow more numerical operations by
creating a wrapper dtype which doesn't register an implementation for most
numpy ufuncs. Until that change is made, consumers of LabelArray should
assume that it is undefined behavior to pass a LabelArray to any numpy
ufunc that operates on semantically-numerical data.
See Also
--------
https://docs.scipy.org/doc/numpy-1.11.0/user/basics.subclassing.html
"""
SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None))
SUPPORTED_NON_NONE_SCALAR_TYPES = (bytes, unicode)
@preprocess(
values=coerce(list, partial(np.asarray, dtype=object)),
# Coerce ``list`` to ``list`` to make a copy. Code internally may call
# ``categories.insert(0, missing_value)`` which will mutate this list
# in place.
categories=coerce((list, np.ndarray, set), list),
)
@expect_types(
values=np.ndarray,
missing_value=SUPPORTED_SCALAR_TYPES,
categories=optional(list),
)
@expect_kinds(values=("O", "S", "U"))
def __new__(cls, values, missing_value, categories=None, sort=True):
# Numpy's fixed-width string types aren't very efficient. Working with
# object arrays is faster than bytes or unicode arrays in almost all
# cases.
if not is_object(values):
values = values.astype(object)
if values.flags.f_contiguous:
ravel_order = "F"
else:
ravel_order = "C"
if categories is None:
codes, categories, reverse_categories = factorize_strings(
values.ravel(ravel_order),
missing_value=missing_value,
sort=sort,
)
else:
(
codes,
categories,
reverse_categories,
) = factorize_strings_known_categories(
values.ravel(ravel_order),
categories=categories,
missing_value=missing_value,
sort=sort,
)
categories.setflags(write=False)
return cls.from_codes_and_metadata(
codes=codes.reshape(values.shape, order=ravel_order),
categories=categories,
reverse_categories=reverse_categories,
missing_value=missing_value,
)
@classmethod
def from_codes_and_metadata(
cls, codes, categories, reverse_categories, missing_value
):
"""
Rehydrate a LabelArray from the codes and metadata.
Parameters
----------
codes : np.ndarray[integral]
The codes for the label array.
categories : np.ndarray[object]
The unique string categories.
reverse_categories : dict[str, int]
The mapping from category to its code-index.
missing_value : any
The value used to represent missing data.
"""
ret = codes.view(type=cls, dtype=np.void)
ret._categories = categories
ret._reverse_categories = reverse_categories
ret._missing_value = missing_value
return ret
@classmethod
def from_categorical(cls, categorical, missing_value=None):
"""
Create a LabelArray from a pandas categorical.
Parameters
----------
categorical : pd.Categorical
The categorical object to convert.
missing_value : bytes, unicode, or None, optional
The missing value to use for this LabelArray.
Returns
-------
la : LabelArray
The LabelArray representation of this categorical.
"""
return LabelArray(
categorical,
missing_value,
categorical.categories,
)
@property
def categories(self):
# This is a property because it should be immutable.
return self._categories
@property
def reverse_categories(self):
# This is a property because it should be immutable.
return self._reverse_categories
@property
def missing_value(self):
# This is a property because it should be immutable.
return self._missing_value
@property
def missing_value_code(self):
return self.reverse_categories[self.missing_value]
def has_label(self, value):
return value in self.reverse_categories
def __array_finalize__(self, obj):
"""
Called by Numpy after array construction.
There are three cases where this can happen:
1. Someone tries to directly construct a new array by doing::
>>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP
In this case, obj will be None. We treat this as an error case and
fail.
2. Someone (most likely our own __new__) does::
>>> other_array.view(type=LabelArray) # doctest: +SKIP
In this case, `self` will be the new LabelArray instance, and
``obj` will be the array on which ``view`` is being called.
The caller of ``obj.view`` is responsible for setting category
metadata on ``self`` after we exit.
3. Someone creates a new LabelArray by slicing an existing one.
In this case, ``obj`` will be the original LabelArray. We're
responsible for copying over the parent array's category metadata.
"""
if obj is None:
raise TypeError(
"Direct construction of LabelArrays is not supported."
)
# See docstring for an explanation of when these will or will not be
# set.
self._categories = getattr(obj, "categories", None)
self._reverse_categories = getattr(obj, "reverse_categories", None)
self._missing_value = getattr(obj, "missing_value", None)
def as_int_array(self):
"""
Convert self into a regular ndarray of ints.
This is an O(1) operation. It does not copy the underlying data.
"""
return self.view(
type=ndarray,
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
def as_string_array(self):
"""
Convert self back into an array of strings.
This is an O(N) operation.
"""
return self.categories[self.as_int_array()]
def as_categorical(self):
"""
Coerce self into a pandas categorical.
This is only defined on 1D arrays, since that's all pandas supports.
"""
if len(self.shape) > 1:
raise ValueError("Can't convert a 2D array to a categorical.")
#20230717 (by MRC) 以下(337~354)修正pipeline遇到文字類型欄位時會出現 ValueError
cat = self.categories.copy()
cat[pd.isnull(cat)] = 'N/A'
return pd.Categorical.from_codes(
self.as_int_array(),
cat,
ordered=False)
'''
with ignore_pandas_nan_categorical_warning():
return pd.Categorical.from_codes(
self.as_int_array(),
# We need to make a copy because pandas >= 0.17 fails if this
# buffer isn't writeable.
self.categories.copy(),
ordered=False,
)
'''
def as_categorical_frame(self, index, columns, name=None):
"""
Coerce self into a pandas DataFrame of Categoricals.
"""
if len(self.shape) != 2:
raise ValueError(
"Can't convert a non-2D LabelArray into a DataFrame."
)
expected_shape = (len(index), len(columns))
if expected_shape != self.shape:
raise ValueError(
"Can't construct a DataFrame with provided indices:\n\n"
"LabelArray shape is {actual}, but index and columns imply "
"that shape should be {expected}.".format(
actual=self.shape,
expected=expected_shape,
)
)
return pd.Series(
index=pd.MultiIndex.from_product([index, columns]),
data=self.ravel().as_categorical(),
name=name,
).unstack()
def __setitem__(self, indexer, value):
self_categories = self.categories
if isinstance(value, self.SUPPORTED_SCALAR_TYPES):
value_code = self.reverse_categories.get(value, None)
if value_code is None:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
elif isinstance(value, LabelArray):
value_categories = value.categories
if compare_arrays(self_categories, value_categories):
return super(LabelArray, self).__setitem__(indexer, value)
elif self.missing_value == value.missing_value and set(
value.categories
) <= set(self.categories):
rhs = LabelArray.from_codes_and_metadata(
*factorize_strings_known_categories(
value.as_string_array().ravel(),
list(self.categories),
self.missing_value,
False,
),
missing_value=self.missing_value,
).reshape(value.shape)
super(LabelArray, self).__setitem__(indexer, rhs)
else:
raise CategoryMismatch(self_categories, value_categories)
else:
raise NotImplementedError(
"Setting into a LabelArray with a value of "
"type {type} is not yet supported.".format(
type=type(value).__name__,
),
)
def set_scalar(self, indexer, value):
"""
Set scalar value into the array.
Parameters
----------
indexer : any
The indexer to set the value at.
value : str
The value to assign at the given locations.
Raises
------
ValueError
Raised when ``value`` is not a value element of this this label
array.
"""
try:
value_code = self.reverse_categories[value]
except KeyError:
raise ValueError("%r is not in LabelArray categories." % value)
self.as_int_array()[indexer] = value_code
def __getitem__(self, indexer):
result = super(LabelArray, self).__getitem__(indexer)
if result.ndim:
# Result is still a LabelArray, so we can just return it.
return result
# Result is a scalar value, which will be an instance of np.void.
# Map it back to one of our category entries.
index = result.view(
unsigned_int_dtype_with_size_in_bytes(self.itemsize),
)
return self.categories[index]
def is_missing(self):
"""
Like isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() == self.reverse_categories[self.missing_value]
)
def not_missing(self):
"""
Like ~isnan, but checks for locations where we store missing values.
"""
return (
self.as_int_array() != self.reverse_categories[self.missing_value]
)
def _equality_check(op):
"""
Shared code for __eq__ and __ne__, parameterized on the actual
comparison operator to use.
"""
def method(self, other):
if isinstance(other, LabelArray):
self_mv = self.missing_value
other_mv = other.missing_value
if self_mv != other_mv:
raise MissingValueMismatch(self_mv, other_mv)
self_categories = self.categories
other_categories = other.categories
if not compare_arrays(self_categories, other_categories):
raise CategoryMismatch(self_categories, other_categories)
return (
op(self.as_int_array(), other.as_int_array())
& self.not_missing()
& other.not_missing()
)
elif isinstance(other, ndarray):
# Compare to ndarrays as though we were an array of strings.
# This is fairly expensive, and should generally be avoided.
return op(self.as_string_array(), other) & self.not_missing()
elif isinstance(other, self.SUPPORTED_SCALAR_TYPES):
i = self._reverse_categories.get(other, -1)
return op(self.as_int_array(), i) & self.not_missing()
return op(super(LabelArray, self), other)
return method
__eq__ = _equality_check(eq)
__ne__ = _equality_check(ne)
del _equality_check
def view(self, dtype=_NotPassed, type=_NotPassed):
if type is _NotPassed and dtype not in (_NotPassed, self.dtype):
raise TypeError("Can't view LabelArray as another dtype.")
# The text signature on ndarray.view makes it look like the default
# values for dtype and type are `None`, but passing None explicitly has
# different semantics than not passing an arg at all, so we reconstruct
# the kwargs dict here to simulate the args not being passed at all.
kwargs = {}
if dtype is not _NotPassed:
kwargs["dtype"] = dtype
if type is not _NotPassed:
kwargs["type"] = type
return super(LabelArray, self).view(**kwargs)
def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
if dtype == self.dtype:
if not subok:
array = self.view(type=np.ndarray)
else:
array = self
if copy:
return array.copy()
return array
if dtype == object_dtype:
return self.as_string_array()
if dtype.kind == "S":
return self.as_string_array().astype(
dtype,
order=order,
casting=casting,
subok=subok,
copy=copy,
)
raise TypeError(
"%s can only be converted into object, string, or void,"
" got: %r"
% (
type(self).__name__,
dtype,
),
)
# In general, we support resizing, slicing, and reshaping methods, but not
# numeric methods.
SUPPORTED_NDARRAY_METHODS = frozenset(
[
"astype",
"base",
"compress",
"copy",
"data",
"diagonal",
"dtype",
"flat",
"flatten",
"item",
"itemset",
"itemsize",
"nbytes",
"ndim",
"ravel",
"repeat",
"reshape",
"resize",
"setflags",
"shape",
"size",
"squeeze",
"strides",
"swapaxes",
"take",
"trace",
"transpose",
"view",
]
)
PUBLIC_NDARRAY_METHODS = frozenset(
[s for s in dir(ndarray) if not s.startswith("_")]
)
# Generate failing wrappers for all unsupported methods.
locals().update(
{
method: _make_unsupported_method(method)
for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS
}
)
def __repr__(self):
repr_lines = repr(self.as_string_array()).splitlines()
repr_lines[0] = repr_lines[0].replace("array(", "LabelArray(", 1)
repr_lines[-1] = repr_lines[-1].rsplit(",", 1)[0] + ")"
# The extra spaces here account for the difference in length between
# 'array(' and 'LabelArray('.
return "\n ".join(repr_lines)
def empty_like(self, shape):
"""
Make an empty LabelArray with the same categories as ``self``, filled
with ``self.missing_value``.
"""
return type(self).from_codes_and_metadata(
codes=np.full(
shape,
self.reverse_categories[self.missing_value],
dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize),
),
categories=self.categories,
reverse_categories=self.reverse_categories,
missing_value=self.missing_value,
)
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()]
def map(self, f):
"""
Map a function from str -> str element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always map to ``self.missing_value``.
"""
# f() should only return None if None is our missing value.
if self.missing_value is None:
allowed_outtypes = self.SUPPORTED_SCALAR_TYPES
else:
allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES
def f_to_use(
x, missing_value=self.missing_value, otypes=allowed_outtypes
):
# Don't call f on the missing value; those locations don't exist
# semantically. We return _sortable_sentinel rather than None
# because the np.unique call below sorts the categories array,
# which raises an error on Python 3 because None and str aren't
# comparable.
if x == missing_value:
return _sortable_sentinel
ret = f(x)
if not isinstance(ret, otypes):
raise TypeError(
"LabelArray.map expected function {f} to return a string"
" or None, but got {type} instead.\n"
"Value was {value}.".format(
f=f.__name__,
type=type(ret).__name__,
value=ret,
)
)
if ret == missing_value:
return _sortable_sentinel
return ret
new_categories_with_duplicates = np.vectorize(
f_to_use, otypes=[object]
)(self.categories)
# If f() maps multiple inputs to the same output, then we can end up
# with the same code duplicated multiple times. Compress the categories
# by running them through np.unique, and then use the reverse lookup
# table to compress codes as well.
new_categories, bloated_inverse_index = np.unique(
new_categories_with_duplicates, return_inverse=True
)
if new_categories[0] is _sortable_sentinel:
# f_to_use return _sortable_sentinel for locations that should be
# missing values in our output. Since np.unique returns the uniques
# in sorted order, and since _sortable_sentinel sorts before any
# string, we only need to check the first array entry.
new_categories[0] = self.missing_value
# `reverse_index` will always be a 64 bit integer even if we can hold a
# smaller array.
reverse_index = bloated_inverse_index.astype(
smallest_uint_that_can_hold(len(new_categories))
)
new_codes = np.take(reverse_index, self.as_int_array())
return self.from_codes_and_metadata(
new_codes,
new_categories,
dict(zip(new_categories, range(len(new_categories)))),
missing_value=self.missing_value,
)
def startswith(self, prefix):
"""
Element-wise startswith.
Parameters
----------
prefix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self started with ``prefix``.
"""
return self.map_predicate(lambda elem: elem.startswith(prefix))
def endswith(self, suffix):
"""
Elementwise endswith.
Parameters
----------
suffix : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``
"""
return self.map_predicate(lambda elem: elem.endswith(suffix))
def has_substring(self, substring):
"""
Elementwise contains.
Parameters
----------
substring : str
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self ended with ``suffix``.
"""
return self.map_predicate(lambda elem: substring in elem)
@preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile))
def matches(self, pattern):
"""
Elementwise regex match.
Parameters
----------
pattern : str or compiled regex
Returns
-------
matches : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was matched by ``pattern``.
"""
return self.map_predicate(compose(bool, pattern.match))
# These types all implement an O(N) __contains__, so pre-emptively
# coerce to `set`.
@preprocess(container=coerce((list, tuple, np.ndarray), set))
def element_of(self, container):
"""
Check if each element of self is an of ``container``.
Parameters
----------
container : object
An object implementing a __contains__ to call on each element of
``self``.
Returns
-------
is_contained : np.ndarray[bool]
An array with the same shape as self indicating whether each
element of self was an element of ``container``.
"""
return self.map_predicate(container.__contains__)
@instance # This makes _sortable_sentinel a singleton instance.
@total_ordering
class _sortable_sentinel(object):
"""Dummy object that sorts before any other python object."""
def __eq__(self, other):
return self is other
def __lt__(self, other):
return True
@expect_types(trues=LabelArray, falses=LabelArray)
def labelarray_where(cond, trues, falses):
"""LabelArray-aware implementation of np.where."""
if trues.missing_value != falses.missing_value:
raise ValueError(
"Can't compute where on arrays with different missing values."
)
strs = np.where(cond, trues.as_string_array(), falses.as_string_array())
return LabelArray(strs, missing_value=trues.missing_value) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/lib/labelarray.py | labelarray.py |
import os
import subprocess
import networkx as nx
def debug_mro_failure(name, bases):
graph = build_linearization_graph(name, bases)
cycles = sorted(nx.cycles.simple_cycles(graph), key=len)
cycle = cycles[0]
if os.environ.get("DRAW_MRO_FAILURES"):
output_file = name + ".dot"
else:
output_file = None
# Return a nicely formatted error describing the cycle.
lines = ["Cycle found when trying to compute MRO for {}:\n".format(name)]
for source, dest in list(zip(cycle, cycle[1:])) + [(cycle[-1], cycle[0])]:
label = verbosify_label(graph.get_edge_data(source, dest)["label"])
lines.append("{} comes before {}: cause={}".format(source, dest, label))
# Either graphviz graph and tell the user where it went, or tell people how
# to enable that feature.
lines.append("")
if output_file is None:
lines.append(
"Set the DRAW_MRO_FAILURES environment variable to"
" render a GraphViz graph of this cycle."
)
else:
try:
nx.write_dot(graph.subgraph(cycle), output_file)
subprocess.check_call(["dot", "-T", "svg", "-O", output_file])
lines.append("GraphViz rendering written to " + output_file + ".svg")
except Exception as e:
lines.append("Failed to write GraphViz graph. Error was {}".format(e))
return "\n".join(lines)
def build_linearization_graph(child_name, bases):
g = nx.DiGraph()
_build_linearization_graph(g, type(child_name, (object,), {}), bases)
return g
def _build_linearization_graph(g, child, bases):
add_implicit_edges(g, child, bases)
add_direct_edges(g, child, bases)
def add_direct_edges(g, child, bases):
# Enforce that bases are ordered in the order that the appear in child's
# class declaration.
g.add_path([b.__name__ for b in bases], label=child.__name__ + "(O)")
# Add direct edges.
for base in bases:
g.add_edge(child.__name__, base.__name__, label=child.__name__ + "(D)")
add_direct_edges(g, base, base.__bases__)
def add_implicit_edges(g, child, bases):
# Enforce that bases' previous linearizations are preserved.
for base in bases:
g.add_path(
[b.__name__ for b in base.mro()],
label=base.__name__ + "(L)",
)
VERBOSE_LABELS = {
"(D)": "(Direct Subclass)",
"(O)": "(Parent Class Order)",
"(L)": "(Linearization Order)",
}
def verbosify_label(label):
prefix = label[:-3]
suffix = label[-3:]
return " ".join([prefix, VERBOSE_LABELS[suffix]]) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/testing/debug.py | debug.py |
from collections import OrderedDict
from contextlib import contextmanager
import datetime
from functools import partial
import numpy as np
import pandas as pd
from pandas.testing import (
assert_frame_equal,
assert_series_equal,
assert_index_equal,
)
from itertools import zip_longest
from toolz import keyfilter
import toolz.curried.operator as op
from zipline.assets import Asset
from zipline.dispatch import dispatch
from zipline.lib.adjustment import Adjustment
from zipline.lib.labelarray import LabelArray
from zipline.testing.core import ensure_doctest
from zipline.utils.compat import getargspec, mappingproxy
from zipline.utils.formatting import s
from zipline.utils.functional import dzip_exact, instance
from zipline.utils.math_utils import tolerant_equals
from zipline.utils.numpy_utils import (
assert_array_compare,
compare_datetime_arrays,
)
@instance
@ensure_doctest
class wildcard(object):
"""An object that compares equal to any other object.
This is useful when using :func:`~zipline.testing.predicates.assert_equal`
with a large recursive structure and some fields to be ignored.
Examples
--------
>>> wildcard == 5
True
>>> wildcard == 'ayy'
True
# reflected
>>> 5 == wildcard
True
>>> 'ayy' == wildcard
True
"""
@staticmethod
def __eq__(other):
return True
@staticmethod
def __ne__(other):
return False
def __repr__(self):
return "<%s>" % type(self).__name__
class instance_of(object):
"""An object that compares equal to any instance of a given type or types.
Parameters
----------
types : type or tuple[type]
The types to compare equal to.
exact : bool, optional
Only compare equal to exact instances, not instances of subclasses?
"""
def __init__(self, types, exact=False):
if not isinstance(types, tuple):
types = (types,)
for type_ in types:
if not isinstance(type_, type):
raise TypeError("types must be a type or tuple of types")
self.types = types
self.exact = exact
def __eq__(self, other):
if self.exact:
return type(other) in self.types
return isinstance(other, self.types)
def __ne__(self, other):
return not self == other
def __repr__(self):
typenames = tuple(t.__name__ for t in self.types)
return "%s(%s%s)" % (
type(self).__name__,
(
typenames[0]
if len(typenames) == 1
else "(%s)" % ", ".join(typenames)
),
", exact=True" if self.exact else "",
)
def keywords(func):
"""Get the argument names of a function
>>> def f(x, y=2):
... pass
>>> keywords(f)
['x', 'y']
Notes
-----
Taken from odo.utils
"""
if isinstance(func, type):
return keywords(func.__init__)
elif isinstance(func, partial):
return keywords(func.func)
return getargspec(func).args
def filter_kwargs(f, kwargs):
"""Return a dict of valid kwargs for `f` from a subset of `kwargs`
Examples
--------
>>> def f(a, b=1, c=2):
... return a + b + c
...
>>> raw_kwargs = dict(a=1, b=3, d=4)
>>> f(**raw_kwargs)
Traceback (most recent call last):
...
TypeError: f() got an unexpected keyword argument 'd'
>>> kwargs = filter_kwargs(f, raw_kwargs)
>>> f(**kwargs)
6
Notes
-----
Taken from odo.utils
"""
return keyfilter(op.contains(keywords(f)), kwargs)
def _fmt_path(path):
"""Format the path for final display.
Parameters
----------
path : iterable of str
The path to the values that are not equal.
Returns
-------
fmtd : str
The formatted path to put into the error message.
"""
if not path:
return ""
return "path: _" + "".join(path)
def _fmt_msg(msg):
"""Format the message for final display.
Parameters
----------
msg : str
The message to show to the user to provide additional context.
returns
-------
fmtd : str
The formatted message to put into the error message.
"""
if not msg:
return ""
return msg + "\n"
def make_assert_equal_assertion_error(assertion_message, path, msg):
"""Create an assertion error formatted for use in ``assert_equal``.
Parameters
----------
assertion_message : str
The concrete reason for the failure.
path : tuple[str]
The path leading up to the failure.
msg : str
The user supplied message.
Returns
-------
exception_instance : AssertionError
The new exception instance.
Notes
-----
This doesn't raise the exception, it only returns it.
"""
return AssertionError(
"%s%s\n%s"
% (
_fmt_msg(msg),
assertion_message,
_fmt_path(path),
),
)
@dispatch(object, object)
def assert_equal(result, expected, path=(), msg="", **kwargs):
"""Assert that two objects are equal using the ``==`` operator.
Parameters
----------
result : object
The result that came from the function under test.
expected : object
The expected result.
Raises
------
AssertionError
Raised when ``result`` is not equal to ``expected``.
"""
if result != expected:
raise make_assert_equal_assertion_error(
"%s != %s" % (result, expected),
path,
msg,
)
@assert_equal.register(float, float)
def assert_float_equal(
result,
expected,
path=(),
msg="",
float_rtol=10e-7,
float_atol=10e-7,
float_equal_nan=True,
**kwargs,
):
assert tolerant_equals(
result,
expected,
rtol=float_rtol,
atol=float_atol,
equal_nan=float_equal_nan,
), "%s%s != %s with rtol=%s and atol=%s%s\n%s" % (
_fmt_msg(msg),
result,
expected,
float_rtol,
float_atol,
(" (with nan != nan)" if not float_equal_nan else ""),
_fmt_path(path),
)
def _check_sets(result, expected, msg, path, type_):
"""Compare two sets. This is used to check dictionary keys and sets.
Parameters
----------
result : set
expected : set
msg : str
path : tuple
type : str
The type of an element. For dict we use ``'key'`` and for set we use
``'element'``.
"""
if result != expected:
if result > expected:
diff = result - expected
msg = "extra %s in result: %r" % (s(type_, diff), diff)
elif result < expected:
diff = expected - result
msg = "result is missing %s: %r" % (s(type_, diff), diff)
else:
in_result = result - expected
in_expected = expected - result
msg = "%s only in result: %s\n%s only in expected: %s" % (
s(type_, in_result),
in_result,
s(type_, in_expected),
in_expected,
)
raise AssertionError(
"%ss do not match\n%s%s"
% (
type_,
_fmt_msg(msg),
_fmt_path(path),
),
)
@assert_equal.register(dict, dict)
def assert_dict_equal(result, expected, path=(), msg="", **kwargs):
_check_sets(
result.keys(),
expected.keys(),
msg,
path + (".keys()",),
"key",
)
failures = []
for k, (resultv, expectedv) in dzip_exact(result, expected).items():
try:
assert_equal(
resultv,
expectedv,
path=path + ("[%r]" % (k,),),
msg=msg,
**kwargs,
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError("\n===\n".join(failures))
@assert_equal.register(mappingproxy, mappingproxy)
def asssert_mappingproxy_equal(result, expected, path=(), msg="", **kwargs):
# mappingproxies compare like dict but shouldn't compare to dicts
_check_sets(
set(result),
set(expected),
msg,
path + (".keys()",),
"key",
)
failures = []
for k, resultv in result.items():
# we know this exists because of the _check_sets call above
expectedv = expected[k]
try:
assert_equal(
resultv,
expectedv,
path=path + ("[%r]" % (k,),),
msg=msg,
**kwargs,
)
except AssertionError as e:
failures.append(str(e))
if failures:
raise AssertionError("\n".join(failures))
@assert_equal.register(OrderedDict, OrderedDict)
def assert_ordereddict_equal(result, expected, path=(), **kwargs):
assert_sequence_equal(
result.items(), expected.items(), path=path + (".items()",), **kwargs
)
@assert_equal.register(list, list)
@assert_equal.register(tuple, tuple)
def assert_sequence_equal(result, expected, path=(), msg="", **kwargs):
result_len = len(result)
expected_len = len(expected)
assert (
result_len == expected_len
), "%s%s lengths do not match: %d != %d\n%s" % (
_fmt_msg(msg),
type(result).__name__,
result_len,
expected_len,
_fmt_path(path),
)
for n, (resultv, expectedv) in enumerate(zip(result, expected)):
assert_equal(
resultv, expectedv, path=path + ("[%d]" % n,), msg=msg, **kwargs
)
@assert_equal.register(set, set)
def assert_set_equal(result, expected, path=(), msg="", **kwargs):
_check_sets(
result,
expected,
msg,
path,
"element",
)
@assert_equal.register(np.ndarray, np.ndarray)
def assert_array_equal(
result,
expected,
path=(),
msg="",
array_verbose=True,
array_decimal=None,
**kwargs,
):
result_dtype = result.dtype
expected_dtype = expected.dtype
if result_dtype.kind in "mM" and expected_dtype.kind in "mM":
assert result_dtype == expected_dtype, (
"\nType mismatch:\n\n"
"result dtype: %s\n"
"expected dtype: %s\n%s"
% (result_dtype, expected_dtype, _fmt_path(path))
)
f = partial(
assert_array_compare,
compare_datetime_arrays,
header="Arrays are not equal",
)
elif array_decimal is not None and expected_dtype.kind not in {"O", "S"}:
f = partial(
np.testing.assert_array_almost_equal,
decimal=array_decimal,
)
else:
f = np.testing.assert_array_equal
try:
f(
result,
expected,
verbose=array_verbose,
err_msg=msg,
)
except AssertionError as e:
raise AssertionError("\n".join((str(e), _fmt_path(path))))
@assert_equal.register(LabelArray, LabelArray)
def assert_labelarray_equal(result, expected, path=(), **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + (".categories",),
**kwargs,
)
assert_equal(
result.as_int_array(),
expected.as_int_array(),
path=path + (".as_int_array()",),
**kwargs,
)
def _register_assert_equal_wrapper(type_, assert_eq):
"""Register a new check for an ndframe object.
Parameters
----------
type_ : type
The class to register an ``assert_equal`` dispatch for.
assert_eq : callable[type_, type_]
The function which checks that if the two ndframes are equal.
Returns
-------
assert_ndframe_equal : callable[type_, type_]
The wrapped function registered with ``assert_equal``.
"""
@assert_equal.register(type_, type_)
def assert_ndframe_equal(result, expected, path=(), msg="", **kwargs):
try:
assert_eq(result, expected, **filter_kwargs(assert_eq, kwargs))
except AssertionError as e:
raise AssertionError(
_fmt_msg(msg) + "\n".join((str(e), _fmt_path(path))),
)
return assert_ndframe_equal
assert_frame_equal = _register_assert_equal_wrapper(
pd.DataFrame,
assert_frame_equal,
)
assert_series_equal = _register_assert_equal_wrapper(
pd.Series,
assert_series_equal,
)
assert_index_equal = _register_assert_equal_wrapper(
pd.Index,
assert_index_equal,
)
@assert_equal.register(pd.Categorical, pd.Categorical)
def assert_categorical_equal(result, expected, path=(), msg="", **kwargs):
assert_equal(
result.categories,
expected.categories,
path=path + (".categories",),
msg=msg,
**kwargs,
)
assert_equal(
result.codes, expected.codes, path=path + (".codes",), msg=msg, **kwargs
)
@assert_equal.register(Adjustment, Adjustment)
def assert_adjustment_equal(result, expected, path=(), **kwargs):
for attr in ("first_row", "last_row", "first_col", "last_col", "value"):
assert_equal(
getattr(result, attr),
getattr(expected, attr),
path=path + ("." + attr,),
**kwargs,
)
@assert_equal.register(
(datetime.datetime, np.datetime64),
(datetime.datetime, np.datetime64),
)
def assert_timestamp_and_datetime_equal(
result,
expected,
path=(),
msg="",
allow_datetime_coercions=False,
compare_nat_equal=True,
**kwargs,
):
"""
Branch for comparing python datetime (which includes pandas Timestamp) and
np.datetime64 as equal.
Returns raises unless ``allow_datetime_coercions`` is passed as True.
"""
assert allow_datetime_coercions or type(result) == type(
expected
), "%sdatetime types (%s, %s) don't match and " "allow_datetime_coercions was not set.\n%s" % (
_fmt_msg(msg),
type(result),
type(expected),
_fmt_path(path),
)
if isinstance(result, pd.Timestamp) and isinstance(expected, pd.Timestamp):
assert_equal(
result.tz, expected.tz, path=path + (".tz",), msg=msg, **kwargs
)
result = pd.Timestamp(result)
expected = pd.Timestamp(expected)
if compare_nat_equal and pd.isnull(result) and pd.isnull(expected):
return
assert_equal.dispatch(object, object)(
result, expected, path=path, msg=msg, **kwargs
)
@assert_equal.register(slice, slice)
def assert_slice_equal(result, expected, path=(), msg=""):
diff_start = (
("starts are not equal: %s != %s" % (result.start, result.stop))
if result.start != expected.start
else ""
)
diff_stop = (
("stops are not equal: %s != %s" % (result.stop, result.stop))
if result.stop != expected.stop
else ""
)
diff_step = (
("steps are not equal: %s != %s" % (result.step, result.stop))
if result.step != expected.step
else ""
)
diffs = diff_start, diff_stop, diff_step
assert not any(diffs), "%s%s\n%s" % (
_fmt_msg(msg),
"\n".join(filter(None, diffs)),
_fmt_path(path),
)
@assert_equal.register(Asset, Asset)
def assert_asset_equal(result, expected, path=(), msg="", **kwargs):
if type(result) is not type(expected):
raise AssertionError(
"%sresult type differs from expected type: %s is not %s\n%s",
_fmt_msg(msg),
type(result).__name__,
type(expected).__name__,
_fmt_path(path),
)
assert_equal(
result.to_dict(),
expected.to_dict(),
path=path + (".to_dict()",),
msg=msg,
**kwargs,
)
def assert_isidentical(result, expected, msg=""):
assert result.isidentical(expected), "%s%s is not identical to %s" % (
_fmt_msg(msg),
result,
expected,
)
def assert_messages_equal(result, expected, msg=""):
"""Assertion helper for comparing very long strings (e.g. error messages)."""
# The arg here is "keepends" which keeps trailing newlines (which
# matters for checking trailing whitespace). You can't pass keepends by
# name :(.
left_lines = result.splitlines(True)
right_lines = expected.splitlines(True)
iter_lines = enumerate(zip_longest(left_lines, right_lines))
for line, (ll, rl) in iter_lines:
if ll != rl:
col = index_of_first_difference(ll, rl)
raise AssertionError(
"{msg}Messages differ on line {line}, col {col}:"
"\n{ll!r}\n!=\n{rl!r}".format(
msg=_fmt_msg(msg), line=line, col=col, ll=ll, rl=rl
)
)
def index_of_first_difference(left, right):
"""Get the index of the first difference between two strings."""
difflocs = (
i for (i, (lc, rc)) in enumerate(zip_longest(left, right)) if lc != rc
)
try:
return next(difflocs)
except StopIteration:
raise ValueError("Left was equal to right!") | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/testing/predicates.py | predicates.py |
import os
from pathlib import Path
import sqlite3
from unittest import TestCase
import warnings
from logbook import NullHandler, Logger
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
import responses
from toolz import flip, groupby, merge
from zipline.utils.calendar_utils import (
get_calendar,
register_calendar_alias,
)
import h5py
import zipline
from zipline.algorithm import TradingAlgorithm
from zipline.assets import Equity, Future
from zipline.assets.continuous_futures import CHAIN_PREDICATES
from zipline.data.benchmarks import get_benchmark_returns_from_file
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.utils.memoize import classlazyval
from zipline.pipeline import SimplePipelineEngine
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.data.testing import TestingDataSet
from zipline.pipeline.domain import GENERIC, US_EQUITIES
from zipline.pipeline.loaders import USEquityPricingLoader
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.protocol import BarData
from zipline.utils.compat import ExitStack
from zipline.utils.paths import ensure_directory, ensure_directory_containing
from .core import (
create_daily_bar_data,
create_minute_bar_data,
make_simple_equity_info,
tmp_asset_finder,
tmp_dir,
write_hdf5_daily_bars,
)
from .debug import debug_mro_failure
from ..data.adjustments import (
SQLiteAdjustmentReader,
SQLiteAdjustmentWriter,
)
from ..data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from ..data.data_portal import (
DataPortal,
DEFAULT_MINUTE_HISTORY_PREFETCH,
DEFAULT_DAILY_HISTORY_PREFETCH,
)
from ..data.fx import (
InMemoryFXRateReader,
HDF5FXRateReader,
HDF5FXRateWriter,
)
from ..data.hdf5_daily_bars import (
HDF5DailyBarReader,
HDF5DailyBarWriter,
MultiCountryDailyBarReader,
)
from ..data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
FUTURES_MINUTES_PER_DAY,
)
from ..data.resample import (
minute_frame_to_session_frame,
MinuteResampleSessionBarReader,
)
from ..finance.trading import SimulationParameters
from ..utils.classproperty import classproperty
from ..utils.final import FinalMeta, final
from ..utils.memoize import remember_last
from ..utils.date_utils import make_utc_aware
zipline_dir = Path(zipline.__file__).parent
class DebugMROMeta(FinalMeta):
"""Metaclass that helps debug MRO resolution errors."""
def __new__(mcls, name, bases, clsdict):
try:
return super(DebugMROMeta, mcls).__new__(mcls, name, bases, clsdict)
except TypeError as e:
if "(MRO)" in str(e):
msg = debug_mro_failure(name, bases)
raise TypeError(msg)
else:
raise
class ZiplineTestCase(TestCase, metaclass=DebugMROMeta):
"""
Shared extensions to core unittest.TestCase.
Overrides the default unittest setUp/tearDown functions with versions that
use ExitStack to correctly clean up resources, even in the face of
exceptions that occur during setUp/setUpClass.
Subclasses **should not override setUp or setUpClass**!
Instead, they should implement `init_instance_fixtures` for per-test-method
resources, and `init_class_fixtures` for per-class resources.
Resources that need to be cleaned up should be registered using
either `enter_{class,instance}_context` or `add_{class,instance}_callback}.
"""
_in_setup = False
@final
@classmethod
def setUpClass(cls):
# Hold a set of all the "static" attributes on the class. These are
# things that are not populated after the class was created like
# methods or other class level attributes.
cls._static_class_attributes = set(vars(cls))
cls._class_teardown_stack = ExitStack()
try:
cls._base_init_fixtures_was_called = False
cls.init_class_fixtures()
assert cls._base_init_fixtures_was_called, (
"ZiplineTestCase.init_class_fixtures() was not called.\n"
"This probably means that you overrode init_class_fixtures"
" without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
cls.tearDownClass()
raise
@classmethod
def init_class_fixtures(cls):
"""
Override and implement this classmethod to register resources that
should be created and/or torn down on a per-class basis.
Subclass implementations of this should always invoke this with super()
to ensure that fixture mixins work properly.
"""
if cls._in_setup:
raise ValueError(
"Called init_class_fixtures from init_instance_fixtures."
" Did you write super(..., self).init_class_fixtures() instead"
" of super(..., self).init_instance_fixtures()?",
)
cls._base_init_fixtures_was_called = True
@final
@classmethod
def tearDownClass(cls):
# We need to get this before it's deleted by the loop.
stack = cls._class_teardown_stack
for name in set(vars(cls)) - cls._static_class_attributes:
# Remove all of the attributes that were added after the class was
# constructed. This cleans up any large test data that is class
# scoped while still allowing subclasses to access class level
# attributes.
delattr(cls, name)
stack.close()
@final
@classmethod
def enter_class_context(cls, context_manager):
"""
Enter a context manager to be exited during the tearDownClass
"""
if cls._in_setup:
raise ValueError(
"Attempted to enter a class context in init_instance_fixtures."
"\nDid you mean to call enter_instance_context?",
)
return cls._class_teardown_stack.enter_context(context_manager)
@final
@classmethod
def add_class_callback(cls, callback, *args, **kwargs):
"""
Register a callback to be executed during tearDownClass.
Parameters
----------
callback : callable
The callback to invoke at the end of the test suite.
"""
if cls._in_setup:
raise ValueError(
"Attempted to add a class callback in init_instance_fixtures."
"\nDid you mean to call add_instance_callback?",
)
return cls._class_teardown_stack.callback(callback, *args, **kwargs)
@final
def setUp(self):
type(self)._in_setup = True
self._pre_setup_attrs = set(vars(self))
self._instance_teardown_stack = ExitStack()
try:
self._init_instance_fixtures_was_called = False
self.init_instance_fixtures()
assert self._init_instance_fixtures_was_called, (
"ZiplineTestCase.init_instance_fixtures() was not"
" called.\n"
"This probably means that you overrode"
" init_instance_fixtures without calling super()."
)
except BaseException: # Clean up even on KeyboardInterrupt
self.tearDown()
raise
finally:
type(self)._in_setup = False
def init_instance_fixtures(self):
self._init_instance_fixtures_was_called = True
@final
def tearDown(self):
# We need to get this before it's deleted by the loop.
stack = self._instance_teardown_stack
for attr in set(vars(self)) - self._pre_setup_attrs:
delattr(self, attr)
stack.close()
@final
def enter_instance_context(self, context_manager):
"""
Enter a context manager that should be exited during tearDown.
"""
return self._instance_teardown_stack.enter_context(context_manager)
@final
def add_instance_callback(self, callback):
"""
Register a callback to be executed during tearDown.
Parameters
----------
callback : callable
The callback to invoke at the end of each test.
"""
return self._instance_teardown_stack.callback(callback)
def alias(attr_name):
"""Make a fixture attribute an alias of another fixture's attribute by
default.
Parameters
----------
attr_name : str
The name of the attribute to alias.
Returns
-------
p : classproperty
A class property that does the property aliasing.
Examples
--------
>>> class C(object):
... attr = 1
...
>>> class D(C):
... attr_alias = alias('attr')
...
>>> D.attr
1
>>> D.attr_alias
1
>>> class E(D):
... attr_alias = 2
...
>>> E.attr
1
>>> E.attr_alias
2
"""
return classproperty(flip(getattr, attr_name))
class WithDefaultDateBounds(object, metaclass=DebugMROMeta):
"""
ZiplineTestCase mixin which makes it possible to synchronize date bounds
across fixtures.
This fixture should always be the last fixture in bases of any fixture or
test case that uses it.
Attributes
----------
START_DATE : datetime
END_DATE : datetime
The date bounds to be used for fixtures that want to have consistent
dates.
"""
START_DATE = pd.Timestamp("2006-01-03", tz="utc")
END_DATE = pd.Timestamp("2006-12-29", tz="utc")
class WithLogger:
"""
ZiplineTestCase mixin providing cls.log_handler as an instance-level
fixture.
After init_instance_fixtures has been called `self.log_handler` will be a
new ``logbook.NullHandler``.
Methods
-------
make_log_handler() -> logbook.LogHandler
A class method which constructs the new log handler object. By default
this will construct a ``NullHandler``.
"""
make_log_handler = NullHandler
@classmethod
def init_class_fixtures(cls):
super(WithLogger, cls).init_class_fixtures()
cls.log = Logger()
cls.log_handler = cls.enter_class_context(
cls.make_log_handler().applicationbound(),
)
class WithAssetFinder(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture.
After init_class_fixtures has been called, `cls.asset_finder` is populated
with an AssetFinder.
Attributes
----------
ASSET_FINDER_EQUITY_SIDS : iterable[int]
The default sids to construct equity data for.
ASSET_FINDER_EQUITY_SYMBOLS : iterable[str]
The default symbols to use for the equities.
ASSET_FINDER_EQUITY_START_DATE : datetime
The default start date to create equity data for. This defaults to
``START_DATE``.
ASSET_FINDER_EQUITY_END_DATE : datetime
The default end date to create equity data for. This defaults to
``END_DATE``.
ASSET_FINDER_EQUITY_NAMES: iterable[str]
The default names to use for the equities.
ASSET_FINDER_EQUITY_EXCHANGE : str
The default exchange to assign each equity.
ASSET_FINDER_COUNTRY_CODE : str
The default country code to assign each exchange.
Methods
-------
make_equity_info() -> pd.DataFrame
A class method which constructs the dataframe of equity info to write
to the class's asset db. By default this is empty.
make_futures_info() -> pd.DataFrame
A class method which constructs the dataframe of futures contract info
to write to the class's asset db. By default this is empty.
make_exchanges_info() -> pd.DataFrame
A class method which constructs the dataframe of exchange information
to write to the class's assets db. By default this is empty.
make_root_symbols_info() -> pd.DataFrame
A class method which constructs the dataframe of root symbols
information to write to the class's assets db. By default this is
empty.
make_asset_finder_db_url() -> string
A class method which returns the URL at which to create the SQLAlchemy
engine. By default provides a URL for an in-memory database.
make_asset_finder() -> pd.DataFrame
A class method which constructs the actual asset finder object to use
for the class. If this method is overridden then the ``make_*_info``
methods may not be respected.
See Also
--------
zipline.testing.make_simple_equity_info
zipline.testing.make_jagged_equity_info
zipline.testing.make_rotating_equity_info
zipline.testing.make_future_info
zipline.testing.make_commodity_future_info
"""
ASSET_FINDER_EQUITY_SIDS = ord("A"), ord("B"), ord("C")
ASSET_FINDER_EQUITY_SYMBOLS = None
ASSET_FINDER_EQUITY_NAMES = None
ASSET_FINDER_EQUITY_EXCHANGE = "TEST"
ASSET_FINDER_EQUITY_START_DATE = alias("START_DATE")
ASSET_FINDER_EQUITY_END_DATE = alias("END_DATE")
ASSET_FINDER_FUTURE_CHAIN_PREDICATES = CHAIN_PREDICATES
ASSET_FINDER_COUNTRY_CODE = "??"
@classmethod
def _make_info(cls, *args):
return None
make_futures_info = _make_info
make_exchanges_info = _make_info
make_root_symbols_info = _make_info
make_equity_supplementary_mappings = _make_info
del _make_info
@classmethod
def make_equity_info(cls):
return make_simple_equity_info(
cls.ASSET_FINDER_EQUITY_SIDS,
cls.ASSET_FINDER_EQUITY_START_DATE,
cls.ASSET_FINDER_EQUITY_END_DATE,
cls.ASSET_FINDER_EQUITY_SYMBOLS,
cls.ASSET_FINDER_EQUITY_NAMES,
cls.ASSET_FINDER_EQUITY_EXCHANGE,
)
@classmethod
def make_asset_finder_db_url(cls):
return "sqlite:///:memory:"
@classmethod
def make_asset_finder(cls):
"""Returns a new AssetFinder
Returns
-------
asset_finder : zipline.assets.AssetFinder
"""
equities = cls.make_equity_info()
futures = cls.make_futures_info()
root_symbols = cls.make_root_symbols_info()
exchanges = cls.make_exchanges_info(equities, futures, root_symbols)
if exchanges is None:
exchange_names = [
df["exchange"]
for df in (equities, futures, root_symbols)
if df is not None
]
if exchange_names:
exchanges = pd.DataFrame(
{
"exchange": pd.concat(exchange_names).unique(),
"country_code": cls.ASSET_FINDER_COUNTRY_CODE,
}
)
return cls.enter_class_context(
tmp_asset_finder(
url=cls.make_asset_finder_db_url(),
equities=equities,
futures=futures,
exchanges=exchanges,
root_symbols=root_symbols,
equity_supplementary_mappings=(
cls.make_equity_supplementary_mappings()
),
future_chain_predicates=cls.ASSET_FINDER_FUTURE_CHAIN_PREDICATES,
)
)
@classmethod
def init_class_fixtures(cls):
super(WithAssetFinder, cls).init_class_fixtures()
cls.asset_finder = cls.make_asset_finder()
@classlazyval
def all_assets(cls):
"""A list of Assets for all sids in cls.asset_finder."""
return cls.asset_finder.retrieve_all(cls.asset_finder.sids)
@classlazyval
def exchange_names(cls):
"""A list of canonical exchange names for all exchanges in this suite."""
infos = cls.asset_finder.exchange_info.values()
return sorted(i.canonical_name for i in infos)
@classlazyval
def assets_by_calendar(cls):
"""A dict from calendar -> list of assets with that calendar."""
return groupby(lambda a: get_calendar(a.exchange), cls.all_assets)
@classlazyval
def all_calendars(cls):
"""A list of all calendars for assets in this test suite."""
return list(cls.assets_by_calendar)
# TODO_SS: The API here doesn't make sense in a multi-country test scenario.
class WithTradingCalendars:
"""
ZiplineTestCase mixin providing cls.trading_calendar,
cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a
class-level fixture.
After ``init_class_fixtures`` has been called:
- `cls.trading_calendar` is populated with a default of the nyse trading
calendar for compatibility with existing tests
- `cls.all_trading_calendars` is populated with the trading calendars
keyed by name,
- `cls.trading_calendar_for_asset_type` is populated with the trading
calendars keyed by the asset type which uses the respective calendar.
Attributes
----------
TRADING_CALENDAR_STRS : iterable
iterable of identifiers of the calendars to use.
TRADING_CALENDAR_FOR_ASSET_TYPE : dict
A dictionary which maps asset type names to the calendar associated
with that asset type.
"""
TRADING_CALENDAR_STRS = ("NYSE",)
TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: "NYSE", Future: "us_futures"}
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_calendar` with the assumption that the value is the NYSE
# calendar.
TRADING_CALENDAR_PRIMARY_CAL = "NYSE"
@classmethod
def init_class_fixtures(cls):
super(WithTradingCalendars, cls).init_class_fixtures()
cls.trading_calendars = {}
# Silence `pandas.errors.PerformanceWarning: Non-vectorized DateOffset
# being applied to Series or DatetimeIndex` in trading calendar
# construction. This causes nosetest to fail.
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
for cal_str in set(cls.TRADING_CALENDAR_STRS) | {
cls.TRADING_CALENDAR_PRIMARY_CAL
}:
# Set name to allow aliasing.
calendar = get_calendar(cal_str)
setattr(cls, "{0}_calendar".format(cal_str.lower()), calendar)
cls.trading_calendars[cal_str] = calendar
type_to_cal = cls.TRADING_CALENDAR_FOR_ASSET_TYPE.items()
for asset_type, cal_str in type_to_cal:
calendar = get_calendar(cal_str)
cls.trading_calendars[asset_type] = calendar
cls.trading_calendar = cls.trading_calendars[cls.TRADING_CALENDAR_PRIMARY_CAL]
STATIC_BENCHMARK_PATH = os.path.join(
zipline_dir,
"resources",
"market_data",
"SPY_benchmark.csv",
)
@remember_last
def read_checked_in_benchmark_data():
return get_benchmark_returns_from_file(STATIC_BENCHMARK_PATH)
class WithBenchmarkReturns(WithDefaultDateBounds, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.benchmark_returns as a class-level
attribute.
"""
_default_treasury_curves = None
@classproperty
def BENCHMARK_RETURNS(cls):
benchmark_returns = read_checked_in_benchmark_data()
# Zipline ordinarily uses cached benchmark returns data, but when
# running the zipline tests this cache is not always updated to include
# the appropriate dates required by both the futures and equity
# calendars. In order to create more reliable and consistent data
# throughout the entirety of the tests, we read static benchmark
# returns files from source. If a test using this fixture attempts to
# run outside of the static date range of the csv files, raise an
# exception warning the user to either update the csv files in source
# or to use a date range within the current bounds.
static_start_date = benchmark_returns.index[0].date()
static_end_date = benchmark_returns.index[-1].date()
warning_message = (
"The WithBenchmarkReturns fixture uses static data between "
"{static_start} and {static_end}. To use a start and end date "
"of {given_start} and {given_end} you will have to update the "
"file in {benchmark_path} to include the missing dates.".format(
static_start=static_start_date,
static_end=static_end_date,
given_start=cls.START_DATE.date(),
given_end=cls.END_DATE.date(),
benchmark_path=STATIC_BENCHMARK_PATH,
)
)
if (
cls.START_DATE.date() < static_start_date
or cls.END_DATE.date() > static_end_date
):
raise AssertionError(warning_message)
return benchmark_returns
class WithSimParams(WithDefaultDateBounds):
"""
ZiplineTestCase mixin providing cls.sim_params as a class level fixture.
Attributes
----------
SIM_PARAMS_CAPITAL_BASE : float
SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'}
SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'}
Forwarded to ``SimulationParameters``.
SIM_PARAMS_START : datetime
SIM_PARAMS_END : datetime
Forwarded to ``SimulationParameters``. If not
explicitly overridden these will be ``START_DATE`` and ``END_DATE``
Methods
-------
make_simparams(**overrides)
Construct a ``SimulationParameters`` using the defaults defined by
fixture configuration attributes. Any parameters to
``SimulationParameters`` can be overridden by passing them by keyword.
See Also
--------
zipline.finance.trading.SimulationParameters
"""
SIM_PARAMS_CAPITAL_BASE = 1.0e5
SIM_PARAMS_DATA_FREQUENCY = "daily"
SIM_PARAMS_EMISSION_RATE = "daily"
SIM_PARAMS_START = alias("START_DATE")
SIM_PARAMS_END = alias("END_DATE")
@classmethod
def make_simparams(cls, **overrides):
kwargs = dict(
start_session=cls.SIM_PARAMS_START,
end_session=cls.SIM_PARAMS_END,
capital_base=cls.SIM_PARAMS_CAPITAL_BASE,
data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY,
emission_rate=cls.SIM_PARAMS_EMISSION_RATE,
trading_calendar=cls.trading_calendar,
)
kwargs.update(overrides)
return SimulationParameters(**kwargs)
@classmethod
def init_class_fixtures(cls):
super(WithSimParams, cls).init_class_fixtures()
cls.sim_params = cls.make_simparams()
class WithTradingSessions(WithDefaultDateBounds, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions
as a class-level fixture.
After init_class_fixtures has been called, `cls.all_trading_sessions`
is populated with a dictionary of calendar name to the DatetimeIndex
containing the calendar trading days ranging from:
(DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY)
`cls.trading_days`, for compatibility with existing tests which make the
assumption that trading days are equity only, defaults to the nyse trading
sessions.
Attributes
----------
DATA_MAX_DAY : datetime
The most recent trading day in the calendar.
TRADING_DAY_COUNT : int
The number of days to put in the calendar. The default value of
``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can
override TRADING_DAY_COUNT to request more or less data.
"""
DATA_MIN_DAY = alias("START_DATE")
DATA_MAX_DAY = alias("END_DATE")
# For backwards compatibility, exisitng tests and fixtures refer to
# `trading_days` with the assumption that the value is days of the NYSE
# calendar.
trading_days = alias("nyse_sessions")
@classmethod
def init_class_fixtures(cls):
super(WithTradingSessions, cls).init_class_fixtures()
cls.trading_sessions = {}
for cal_str in cls.TRADING_CALENDAR_STRS:
trading_calendar = cls.trading_calendars[cal_str]
sessions = trading_calendar.sessions_in_range(
make_utc_aware(cls.DATA_MIN_DAY), make_utc_aware(cls.DATA_MAX_DAY)
)
# Set name for aliasing.
setattr(cls, "{0}_sessions".format(cal_str.lower()), sessions)
cls.trading_sessions[cal_str] = sessions
class WithTmpDir:
"""
ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture.
After init_class_fixtures has been called, `cls.tmpdir` is populated with
a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`.
Attributes
----------
TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
TMP_DIR_PATH = None
@classmethod
def init_class_fixtures(cls):
super(WithTmpDir, cls).init_class_fixtures()
cls.tmpdir = cls.enter_class_context(
tmp_dir(path=cls.TMP_DIR_PATH),
)
class WithInstanceTmpDir:
"""
ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture.
After init_instance_fixtures has been called, `self.tmpdir` is populated
with a `testfixtures.TempDirectory` object whose path is
`cls.TMP_DIR_PATH`.
Attributes
----------
INSTANCE_TMP_DIR_PATH : str
The path to the new directory to create. By default this is None
which will create a unique directory in /tmp.
"""
INSTANCE_TMP_DIR_PATH = None
def init_instance_fixtures(self):
super(WithInstanceTmpDir, self).init_instance_fixtures()
self.instance_tmpdir = self.enter_instance_context(
tmp_dir(path=self.INSTANCE_TMP_DIR_PATH),
)
class WithEquityDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_equity_daily_bar_data.
Attributes
----------
EQUITY_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bars defined by `WithEquityMinuteBarData`.
The current default is `False`, but could be `True` in the future.
EQUITY_DAILY_BAR_COUNTRY_CODES : tuple
The countres to create data for. By default this is populated
with all of the countries present in the asset finder.
Methods
-------
make_equity_daily_bar_data(country_code, sids)
make_equity_daily_bar_currency_codes(country_code, sids)
See Also
--------
WithEquityMinuteBarData
zipline.testing.create_daily_bar_data
""" # noqa
EQUITY_DAILY_BAR_START_DATE = alias("START_DATE")
EQUITY_DAILY_BAR_END_DATE = alias("END_DATE")
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classproperty
def EQUITY_DAILY_BAR_COUNTRY_CODES(cls):
return cls.asset_finder.country_codes
@classmethod
def _make_equity_daily_bar_from_minute(cls):
assert issubclass(
cls, WithEquityMinuteBarData
), "Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids)
minute_data = dict(cls.make_equity_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid], cls.trading_calendars[Equity]
)
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
"""
Create daily pricing data.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Yields
------
(int, pd.DataFrame)
A sid, dataframe pair to be passed to a daily bar writer.
The dataframe should be indexed by date, with columns of
('open', 'high', 'low', 'close', 'volume', 'day', & 'id').
"""
# Requires a WithEquityMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_equity_daily_bar_from_minute()
else:
return create_daily_bar_data(cls.equity_daily_bar_days, sids)
@classmethod
def make_equity_daily_bar_currency_codes(cls, country_code, sids):
"""Create listing currencies.
Default is to list all assets in USD.
Parameters
----------
country_code : str
An ISO 3166 alpha-2 country code. Data should be created for
this country.
sids : tuple[int]
The sids to include in the data.
Returns
-------
currency_codes : pd.Series[int, str]
Map from sids to currency for that sid's prices.
"""
return pd.Series(index=list(sids), data="USD")
@classmethod
def init_class_fixtures(cls):
super(WithEquityDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE):
first_session = cls.EQUITY_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE)
)
if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session, -1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.EQUITY_DAILY_BAR_END_DATE,
)
cls.equity_daily_bar_days = days
class WithFutureDailyBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.make_future_daily_bar_data.
Attributes
----------
FUTURE_DAILY_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_DAILY_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_future_daily_bar_data` will read data from
the minute bars defined by `WithFutureMinuteBarData`.
The current default is `False`, but could be `True` in the future.
Methods
-------
make_future_daily_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns an iterator of (sid, dataframe) pairs
which will be written to the bcolz files that the class's
``BcolzDailyBarReader`` will read from. By default this creates
some simple synthetic data with
:func:`~zipline.testing.create_daily_bar_data`
See Also
--------
WithFutureMinuteBarData
zipline.testing.create_daily_bar_data
"""
FUTURE_DAILY_BAR_USE_FULL_CALENDAR = False
FUTURE_DAILY_BAR_START_DATE = alias("START_DATE")
FUTURE_DAILY_BAR_END_DATE = alias("END_DATE")
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = None
@classproperty
def FUTURE_DAILY_BAR_LOOKBACK_DAYS(cls):
# If we're sourcing from minute data, then we almost certainly want the
# minute bar calendar to be aligned with the daily bar calendar, so
# re-use the same lookback parameter.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS
else:
return 0
@classmethod
def _make_future_daily_bar_from_minute(cls):
assert issubclass(
cls, WithFutureMinuteBarData
), "Can't source daily data from minute without minute data!"
assets = cls.asset_finder.retrieve_all(cls.asset_finder.futures_sids)
minute_data = dict(cls.make_future_minute_bar_data())
for asset in assets:
yield asset.sid, minute_frame_to_session_frame(
minute_data[asset.sid], cls.trading_calendars[Future]
)
@classmethod
def make_future_daily_bar_data(cls):
# Requires a WithFutureMinuteBarData to come before in the MRO.
# Resample that data so that daily and minute bar data are aligned.
if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE:
return cls._make_future_daily_bar_from_minute()
else:
return create_daily_bar_data(
cls.future_daily_bar_days,
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureDailyBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Future]
if cls.FUTURE_DAILY_BAR_USE_FULL_CALENDAR:
days = trading_calendar.all_sessions
else:
if trading_calendar.is_session(cls.FUTURE_DAILY_BAR_START_DATE):
first_session = cls.FUTURE_DAILY_BAR_START_DATE
else:
first_session = trading_calendar.minute_to_session_label(
pd.Timestamp(cls.FUTURE_DAILY_BAR_START_DATE)
)
if cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS > 0:
first_session = trading_calendar.sessions_window(
first_session, -1 * cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS
)[0]
days = trading_calendar.sessions_in_range(
first_session,
cls.FUTURE_DAILY_BAR_END_DATE,
)
cls.future_daily_bar_days = days
class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.bcolz_equity_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
EQUITY_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``equity_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``EQUITY_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_equity_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzEquityMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_DAILY_BAR_PATH = "daily_equity_pricing.bcolz"
BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None
BCOLZ_DAILY_BAR_COUNTRY_CODE = None
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False
# allows WithBcolzEquityDailyBarReaderFromCSVs to call the
# `write_csvs`method without needing to reimplement `init_class_fixtures`
_write_method_name = "write"
# What to do when data being written is invalid, e.g. nan, inf, etc.
# options are: 'warn', 'raise', 'ignore'
INVALID_DATA_BEHAVIOR = "warn"
@classproperty
def BCOLZ_DAILY_BAR_COUNTRY_CODE(cls):
return cls.EQUITY_DAILY_BAR_COUNTRY_CODES[0]
@classmethod
def make_bcolz_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures()
cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path()
days = cls.equity_daily_bar_days
sids = cls.asset_finder.equities_sids_for_country_code(
cls.BCOLZ_DAILY_BAR_COUNTRY_CODE
)
trading_calendar = cls.trading_calendars[Equity]
cls.bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls._write_method_name,
)(
cls.make_equity_daily_bar_data(
country_code=cls.BCOLZ_DAILY_BAR_COUNTRY_CODE,
sids=sids,
),
invalid_data_behavior=cls.INVALID_DATA_BEHAVIOR,
)
if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD
)
else:
cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzFutureDailyBarReader(WithFutureDailyBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_daily_bar_path,
cls.bcolz_daily_bar_ctable, and cls.bcolz_future_daily_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`.
- `cls.bcolz_daily_bar_ctable` is populated with data returned from
`cls.make_future_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_future_daily_bar_data`.
- `cls.bcolz_future_daily_bar_reader` is a daily bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
FUTURE_DAILY_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day. This is used
when a test needs to use history, in which case this should be set to
the largest history window that will be
requested.
FUTURE_DAILY_BAR_USE_FULL_CALENDAR : bool
If this flag is set the ``future_daily_bar_days`` will be the full
set of trading days from the trading environment. This flag overrides
``FUTURE_DAILY_BAR_LOOKBACK_DAYS``.
BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD : int
If this flag is set, use the value as the `read_all_threshold`
parameter to BcolzDailyBarReader, otherwise use the default
value.
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool
If this flag is set, `make_future_daily_bar_data` will read data from
the minute bar reader defined by a `WithBcolzFutureMinuteBarReader`.
Methods
-------
make_bcolz_daily_bar_rootdir_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithBcolzFutureMinuteBarReader
WithDataPortal
zipline.testing.create_daily_bar_data
"""
BCOLZ_FUTURE_DAILY_BAR_PATH = "daily_future_pricing.bcolz"
BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD = None
FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = False
# What to do when data being written is invalid, e.g. nan, inf, etc.
# options are: 'warn', 'raise', 'ignore'
BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR = "warn"
BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME = "write"
@classmethod
def make_bcolz_future_daily_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzFutureDailyBarReader, cls).init_class_fixtures()
p = cls.make_bcolz_future_daily_bar_rootdir_path()
cls.future_bcolz_daily_bar_path = p
days = cls.future_daily_bar_days
trading_calendar = cls.trading_calendars[Future]
cls.future_bcolz_daily_bar_ctable = t = getattr(
BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]),
cls.BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME,
)(
cls.make_future_daily_bar_data(),
invalid_data_behavior=(cls.BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR),
)
if cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD is not None:
cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader(
t, cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD
)
else:
cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader(t)
class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin that provides
cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV
file paths.
"""
_write_method_name = "write_csvs"
def _trading_days_for_minute_bars(calendar, start_date, end_date, lookback_days):
first_session = calendar.minute_to_session_label(start_date)
if lookback_days > 0:
first_session = calendar.sessions_window(first_session, -1 * lookback_days)[0]
return calendar.sessions_in_range(first_session, end_date)
# TODO_SS: This currently doesn't define any relationship between country_code
# and calendar, which would be useful downstream.
class WithWriteHDF5DailyBars(WithEquityDailyBarData, WithTmpDir):
"""
Fixture class defining the capability of writing HDF5 daily bars to disk.
Uses cls.make_equity_daily_bar_data (inherited from WithEquityDailyBarData)
to determine the data to write.
Methods
-------
write_hdf5_daily_bars(cls, path, country_codes)
Creates an HDF5 file on disk and populates it with pricing data.
Attributes
----------
HDF5_DAILY_BAR_CHUNK_SIZE
"""
HDF5_DAILY_BAR_CHUNK_SIZE = 30
@classmethod
def write_hdf5_daily_bars(cls, path, country_codes):
"""
Write HDF5 pricing data using an HDF5DailyBarWriter.
Parameters
----------
path : str
Location (relative to cls.tmpdir) at which to write data.
country_codes : list[str]
List of country codes to write.
Returns
-------
written : h5py.File
A read-only h5py.File pointing at the written data. The returned
file is registered to be closed automatically during class
teardown.
"""
ensure_directory_containing(path)
writer = HDF5DailyBarWriter(path, cls.HDF5_DAILY_BAR_CHUNK_SIZE)
write_hdf5_daily_bars(
writer,
cls.asset_finder,
country_codes,
cls.make_equity_daily_bar_data,
cls.make_equity_daily_bar_currency_codes,
)
# Open the file and mark it for closure during teardown.
return cls.enter_class_context(writer.h5_file(mode="r"))
class WithHDF5EquityMultiCountryDailyBarReader(WithWriteHDF5DailyBars):
"""
Fixture providing cls.hdf5_daily_bar_path and
cls.hdf5_equity_daily_bar_reader class level fixtures.
After init_class_fixtures has been called:
- `cls.hdf5_daily_bar_path` is populated with
`cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH)`.
- The file at `cls.hdf5_daily_bar_path` is populated with data returned
from `cls.make_equity_daily_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`.
- `cls.hdf5_equity_daily_bar_reader` is a daily bar reader pointing
to the file that was just written to.
Attributes
----------
HDF5_DAILY_BAR_PATH : str
The path inside the tmpdir where this will be written.
HDF5_DAILY_BAR_COUNTRY_CODE : str
The ISO 3166 alpha-2 country code for the country to write/read.
Methods
-------
make_hdf5_daily_bar_path() -> string
A class method that returns the path for the rootdir of the daily
bars ctable. By default this is a subdirectory HDF5_DAILY_BAR_PATH in
the shared temp directory.
See Also
--------
WithDataPortal
zipline.testing.create_daily_bar_data
"""
HDF5_DAILY_BAR_PATH = "daily_equity_pricing.h5"
HDF5_DAILY_BAR_COUNTRY_CODES = alias("EQUITY_DAILY_BAR_COUNTRY_CODES")
@classmethod
def make_hdf5_daily_bar_path(cls):
return cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(
WithHDF5EquityMultiCountryDailyBarReader,
cls,
).init_class_fixtures()
cls.hdf5_daily_bar_path = path = cls.make_hdf5_daily_bar_path()
f = cls.write_hdf5_daily_bars(path, cls.HDF5_DAILY_BAR_COUNTRY_CODES)
cls.single_country_hdf5_equity_daily_bar_readers = {
country_code: HDF5DailyBarReader.from_file(f, country_code)
for country_code in f
}
cls.hdf5_equity_daily_bar_reader = MultiCountryDailyBarReader(
cls.single_country_hdf5_equity_daily_bar_readers
)
class WithEquityMinuteBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.equity_minute_bar_days.
After init_class_fixtures has been called:
- `cls.equity_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
EQUITY_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
EQUITY_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)]
Classmethod producing an iterator of (sid, minute_data) pairs.
The default implementation invokes
zipline.testing.core.create_minute_bar_data.
See Also
--------
WithEquityDailyBarData
zipline.testing.create_minute_bar_data
"""
EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 0
EQUITY_MINUTE_BAR_START_DATE = alias("START_DATE")
EQUITY_MINUTE_BAR_END_DATE = alias("END_DATE")
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
),
cls.asset_finder.equities_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithEquityMinuteBarData, cls).init_class_fixtures()
trading_calendar = cls.trading_calendars[Equity]
cls.equity_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.EQUITY_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.EQUITY_MINUTE_BAR_END_DATE),
cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS,
)
class WithFutureMinuteBarData(WithAssetFinder, WithTradingCalendars):
"""
ZiplineTestCase mixin providing cls.future_minute_bar_days.
After init_class_fixtures has been called:
- `cls.future_minute_bar_days` has the range over which data has been
generated.
Attributes
----------
FUTURE_MINUTE_BAR_LOOKBACK_DAYS : int
The number of days of data to add before the first day.
This is used when a test needs to use history, in which case this
should be set to the largest history window that will be requested.
FUTURE_MINUTE_BAR_START_DATE : Timestamp
The date at to which to start creating data. This defaults to
``START_DATE``.
FUTURE_MINUTE_BAR_END_DATE = Timestamp
The end date up to which to create data. This defaults to ``END_DATE``.
Methods
-------
make_future_minute_bar_data() -> iterable[(int, pd.DataFrame)]
A class method that returns a dict mapping sid to dataframe
which will be written to into the the format of the inherited
class which writes the minute bar data for use by a reader.
By default this creates some simple sythetic data with
:func:`~zipline.testing.create_minute_bar_data`
See Also
--------
zipline.testing.create_minute_bar_data
"""
FUTURE_MINUTE_BAR_LOOKBACK_DAYS = 0
FUTURE_MINUTE_BAR_START_DATE = alias("START_DATE")
FUTURE_MINUTE_BAR_END_DATE = alias("END_DATE")
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = get_calendar("us_futures")
return create_minute_bar_data(
trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
),
cls.asset_finder.futures_sids,
)
@classmethod
def init_class_fixtures(cls):
super(WithFutureMinuteBarData, cls).init_class_fixtures()
trading_calendar = get_calendar("us_futures")
cls.future_minute_bar_days = _trading_days_for_minute_bars(
trading_calendar,
pd.Timestamp(cls.FUTURE_MINUTE_BAR_START_DATE),
pd.Timestamp(cls.FUTURE_MINUTE_BAR_END_DATE),
cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS,
)
class WithBcolzEquityMinuteBarReader(WithEquityMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_EQUITY_MINUTE_BAR_PATH = "minute_equity_pricing"
@classmethod
def make_bcolz_equity_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_EQUITY_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures()
cls.bcolz_equity_minute_bar_path = (
p
) = cls.make_bcolz_equity_minute_bar_rootdir_path()
days = cls.equity_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
cls.trading_calendars[Equity],
days[0],
days[-1],
US_EQUITIES_MINUTES_PER_DAY,
)
writer.write(cls.make_equity_minute_bar_data())
cls.bcolz_equity_minute_bar_reader = BcolzMinuteBarReader(p)
class WithBcolzFutureMinuteBarReader(WithFutureMinuteBarData, WithTmpDir):
"""
ZiplineTestCase mixin providing cls.bcolz_minute_bar_path,
cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader
class level fixtures.
After init_class_fixtures has been called:
- `cls.bcolz_minute_bar_path` is populated with
`cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`.
- `cls.bcolz_minute_bar_ctable` is populated with data returned from
`cls.make_equity_minute_bar_data`. By default this calls
:func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`.
- `cls.bcolz_equity_minute_bar_reader` is a minute bar reader
pointing to the directory that was just written to.
Attributes
----------
BCOLZ_FUTURE_MINUTE_BAR_PATH : str
The path inside the tmpdir where this will be written.
Methods
-------
make_bcolz_minute_bar_rootdir_path() -> string
A class method that returns the path for the directory that contains
the minute bar ctables. By default this is a subdirectory
BCOLZ_MINUTE_BAR_PATH in the shared temp directory.
See Also
--------
WithBcolzEquityDailyBarReader
WithDataPortal
zipline.testing.create_minute_bar_data
"""
BCOLZ_FUTURE_MINUTE_BAR_PATH = "minute_future_pricing"
OHLC_RATIOS_PER_SID = None
@classmethod
def make_bcolz_future_minute_bar_rootdir_path(cls):
return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_MINUTE_BAR_PATH)
@classmethod
def init_class_fixtures(cls):
super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures()
trading_calendar = get_calendar("us_futures")
cls.bcolz_future_minute_bar_path = (
p
) = cls.make_bcolz_future_minute_bar_rootdir_path()
days = cls.future_minute_bar_days
writer = BcolzMinuteBarWriter(
p,
trading_calendar,
days[0],
days[-1],
FUTURES_MINUTES_PER_DAY,
ohlc_ratios_per_sid=cls.OHLC_RATIOS_PER_SID,
)
writer.write(cls.make_future_minute_bar_data())
cls.bcolz_future_minute_bar_reader = BcolzMinuteBarReader(p)
class WithConstantEquityMinuteBarData(WithEquityMinuteBarData):
EQUITY_MINUTE_CONSTANT_LOW = 3.0
EQUITY_MINUTE_CONSTANT_OPEN = 4.0
EQUITY_MINUTE_CONSTANT_CLOSE = 5.0
EQUITY_MINUTE_CONSTANT_HIGH = 6.0
EQUITY_MINUTE_CONSTANT_VOLUME = 100.0
@classmethod
def make_equity_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Equity]
sids = cls.asset_finder.equities_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.equity_minute_bar_days[0],
cls.equity_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
"open": cls.EQUITY_MINUTE_CONSTANT_OPEN,
"high": cls.EQUITY_MINUTE_CONSTANT_HIGH,
"low": cls.EQUITY_MINUTE_CONSTANT_LOW,
"close": cls.EQUITY_MINUTE_CONSTANT_CLOSE,
"volume": cls.EQUITY_MINUTE_CONSTANT_VOLUME,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
class WithConstantFutureMinuteBarData(WithFutureMinuteBarData):
FUTURE_MINUTE_CONSTANT_LOW = 3.0
FUTURE_MINUTE_CONSTANT_OPEN = 4.0
FUTURE_MINUTE_CONSTANT_CLOSE = 5.0
FUTURE_MINUTE_CONSTANT_HIGH = 6.0
FUTURE_MINUTE_CONSTANT_VOLUME = 100.0
@classmethod
def make_future_minute_bar_data(cls):
trading_calendar = cls.trading_calendars[Future]
sids = cls.asset_finder.futures_sids
minutes = trading_calendar.minutes_for_sessions_in_range(
cls.future_minute_bar_days[0],
cls.future_minute_bar_days[-1],
)
frame = pd.DataFrame(
{
"open": cls.FUTURE_MINUTE_CONSTANT_OPEN,
"high": cls.FUTURE_MINUTE_CONSTANT_HIGH,
"low": cls.FUTURE_MINUTE_CONSTANT_LOW,
"close": cls.FUTURE_MINUTE_CONSTANT_CLOSE,
"volume": cls.FUTURE_MINUTE_CONSTANT_VOLUME,
},
index=minutes,
)
return ((sid, frame) for sid in sids)
class WithAdjustmentReader(WithBcolzEquityDailyBarReader):
"""
ZiplineTestCase mixin providing cls.adjustment_reader as a class level
fixture.
After init_class_fixtures has been called, `cls.adjustment_reader` will be
populated with a new SQLiteAdjustmentReader object. The data that will be
written can be passed by overriding `make_{field}_data` where field may
be `splits`, `mergers` `dividends`, or `stock_dividends`.
The daily bar reader used for this adjustment reader may be customized
by overriding `make_adjustment_writer_equity_daily_bar_reader`.
This is useful to providing a `MockDailyBarReader`.
Methods
-------
make_splits_data() -> pd.DataFrame
A class method that returns a dataframe of splits data to write to the
class's adjustment db. By default this is empty.
make_mergers_data() -> pd.DataFrame
A class method that returns a dataframe of mergers data to write to the
class's adjustment db. By default this is empty.
make_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of dividends data to write to
the class's adjustment db. By default this is empty.
make_stock_dividends_data() -> pd.DataFrame
A class method that returns a dataframe of stock dividends data to
write to the class's adjustment db. By default this is empty.
make_adjustment_db_conn_str() -> string
A class method that returns the sqlite3 connection string for the
database in to which the adjustments will be written. By default this
is an in-memory database.
make_adjustment_writer_equity_daily_bar_reader() -> pd.DataFrame
A class method that returns the daily bar reader to use for the class's
adjustment writer. By default this is the class's actual
``bcolz_equity_daily_bar_reader`` as inherited from
``WithBcolzEquityDailyBarReader``. This should probably not be
overridden; however, some tests used a ``MockDailyBarReader``
for this.
make_adjustment_writer(conn: sqlite3.Connection) -> AdjustmentWriter
A class method that constructs the adjustment which will be used
to write the data into the connection to be used by the class's
adjustment reader.
See Also
--------
zipline.testing.MockDailyBarReader
"""
@classmethod
def _make_data(cls):
return None
make_splits_data = _make_data
make_mergers_data = _make_data
make_dividends_data = _make_data
make_stock_dividends_data = _make_data
del _make_data
@classmethod
def make_adjustment_writer(cls, conn):
return SQLiteAdjustmentWriter(
conn,
cls.make_adjustment_writer_equity_daily_bar_reader(),
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return cls.bcolz_equity_daily_bar_reader
@classmethod
def make_adjustment_db_conn_str(cls):
return ":memory:"
@classmethod
def init_class_fixtures(cls):
super(WithAdjustmentReader, cls).init_class_fixtures()
conn = sqlite3.connect(cls.make_adjustment_db_conn_str())
# Silence numpy DeprecationWarnings which cause nosetest to fail
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
cls.make_adjustment_writer(conn).write(
splits=cls.make_splits_data(),
mergers=cls.make_mergers_data(),
dividends=cls.make_dividends_data(),
stock_dividends=cls.make_stock_dividends_data(),
)
cls.adjustment_reader = SQLiteAdjustmentReader(conn)
class WithUSEquityPricingPipelineEngine(WithAdjustmentReader, WithTradingSessions):
"""
Mixin providing the following as a class-level fixtures.
- cls.data_root_dir
- cls.findata_dir
- cls.pipeline_engine
- cls.adjustments_db_path
"""
@classmethod
def init_class_fixtures(cls):
cls.data_root_dir = cls.enter_class_context(tmp_dir())
cls.findata_dir = cls.data_root_dir.makedir("findata")
super(WithUSEquityPricingPipelineEngine, cls).init_class_fixtures()
loader = USEquityPricingLoader.without_fx(
cls.bcolz_equity_daily_bar_reader,
SQLiteAdjustmentReader(cls.adjustments_db_path),
)
def get_loader(column):
if column in USEquityPricing.columns:
return loader
else:
raise AssertionError("No loader registered for %s" % column)
cls.pipeline_engine = SimplePipelineEngine(
get_loader=get_loader,
asset_finder=cls.asset_finder,
default_domain=US_EQUITIES,
)
@classmethod
def make_adjustment_db_conn_str(cls):
cls.adjustments_db_path = os.path.join(
cls.findata_dir,
"adjustments",
cls.END_DATE.strftime("%Y-%m-%d-adjustments.db"),
)
ensure_directory(os.path.dirname(cls.adjustments_db_path))
return cls.adjustments_db_path
class WithSeededRandomPipelineEngine(WithTradingSessions, WithAssetFinder):
"""
ZiplineTestCase mixin providing class-level fixtures for running pipelines
against deterministically-generated random data.
Attributes
----------
SEEDED_RANDOM_PIPELINE_SEED : int
Fixture input. Random seed used to initialize the random state loader.
seeded_random_loader : SeededRandomLoader
Fixture output. Loader capable of providing columns for
zipline.pipeline.data.testing.TestingDataSet.
seeded_random_engine : SimplePipelineEngine
Fixture output. A pipeline engine that will use seeded_random_loader
as its only data provider.
Methods
-------
run_pipeline(start_date, end_date)
Run a pipeline with self.seeded_random_engine.
See Also
--------
zipline.pipeline.loaders.synthetic.SeededRandomLoader
zipline.pipeline.loaders.testing.make_seeded_random_loader
zipline.pipeline.engine.SimplePipelineEngine
"""
SEEDED_RANDOM_PIPELINE_SEED = 42
SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = GENERIC
@classmethod
def init_class_fixtures(cls):
super(WithSeededRandomPipelineEngine, cls).init_class_fixtures()
cls._sids = cls.asset_finder.sids
cls.seeded_random_loader = loader = make_seeded_random_loader(
cls.SEEDED_RANDOM_PIPELINE_SEED,
cls.trading_days,
cls._sids,
columns=cls.make_seeded_random_loader_columns(),
)
cls.seeded_random_engine = SimplePipelineEngine(
get_loader=lambda column: loader,
asset_finder=cls.asset_finder,
default_domain=cls.SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN,
default_hooks=cls.make_seeded_random_pipeline_engine_hooks(),
populate_initial_workspace=(
cls.make_seeded_random_populate_initial_workspace()
),
)
@classmethod
def make_seeded_random_pipeline_engine_hooks(cls):
return []
@classmethod
def make_seeded_random_populate_initial_workspace(cls):
return None
@classmethod
def make_seeded_random_loader_columns(cls):
return TestingDataSet.columns
def raw_expected_values(self, column, start_date, end_date):
"""
Get an array containing the raw values we expect to be produced for the
given dates between start_date and end_date, inclusive.
"""
all_values = self.seeded_random_loader.values(
column.dtype,
self.trading_days,
self._sids,
)
row_slice = self.trading_days.slice_indexer(start_date, end_date)
return all_values[row_slice]
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
"""
Run a pipeline with self.seeded_random_engine.
"""
return self.seeded_random_engine.run_pipeline(
pipeline,
start_date,
end_date,
hooks=hooks,
)
def run_chunked_pipeline(
self, pipeline, start_date, end_date, chunksize, hooks=None
):
"""
Run a chunked pipeline with self.seeded_random_engine.
"""
return self.seeded_random_engine.run_chunked_pipeline(
pipeline,
start_date,
end_date,
chunksize=chunksize,
hooks=hooks,
)
class WithDataPortal(
WithAdjustmentReader,
# Ordered so that bcolz minute reader is used first.
WithBcolzEquityMinuteBarReader,
WithBcolzFutureMinuteBarReader,
):
"""
ZiplineTestCase mixin providing self.data_portal as an instance level
fixture.
After init_instance_fixtures has been called, `self.data_portal` will be
populated with a new data portal created by passing in the class's
trading env, `cls.bcolz_equity_minute_bar_reader`,
`cls.bcolz_equity_daily_bar_reader`, and `cls.adjustment_reader`.
Attributes
----------
DATA_PORTAL_USE_DAILY_DATA : bool
Should the daily bar reader be used? Defaults to True.
DATA_PORTAL_USE_MINUTE_DATA : bool
Should the minute bar reader be used? Defaults to True.
DATA_PORTAL_USE_ADJUSTMENTS : bool
Should the adjustment reader be used? Defaults to True.
Methods
-------
make_data_portal() -> DataPortal
Method which returns the data portal to be used for each test case.
If this is overridden, the ``DATA_PORTAL_USE_*`` attributes may not
be respected.
"""
DATA_PORTAL_USE_DAILY_DATA = True
DATA_PORTAL_USE_MINUTE_DATA = True
DATA_PORTAL_USE_ADJUSTMENTS = True
DATA_PORTAL_FIRST_TRADING_DAY = None
DATA_PORTAL_LAST_AVAILABLE_SESSION = None
DATA_PORTAL_LAST_AVAILABLE_MINUTE = None
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH
DATA_PORTAL_DAILY_HISTORY_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH
def make_data_portal(self):
if self.DATA_PORTAL_FIRST_TRADING_DAY is None:
if self.DATA_PORTAL_USE_MINUTE_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_minute_bar_reader.first_trading_day
)
elif self.DATA_PORTAL_USE_DAILY_DATA:
self.DATA_PORTAL_FIRST_TRADING_DAY = (
self.bcolz_equity_daily_bar_reader.first_trading_day
)
return DataPortal(
self.asset_finder,
self.trading_calendar,
first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY,
equity_daily_reader=(
self.bcolz_equity_daily_bar_reader
if self.DATA_PORTAL_USE_DAILY_DATA
else None
),
equity_minute_reader=(
self.bcolz_equity_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA
else None
),
adjustment_reader=(
self.adjustment_reader if self.DATA_PORTAL_USE_ADJUSTMENTS else None
),
future_minute_reader=(
self.bcolz_future_minute_bar_reader
if self.DATA_PORTAL_USE_MINUTE_DATA
else None
),
future_daily_reader=(
MinuteResampleSessionBarReader(
self.bcolz_future_minute_bar_reader.trading_calendar,
self.bcolz_future_minute_bar_reader,
)
if self.DATA_PORTAL_USE_MINUTE_DATA
else None
),
last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION,
last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE,
minute_history_prefetch_length=self.DATA_PORTAL_MINUTE_HISTORY_PREFETCH,
daily_history_prefetch_length=self.DATA_PORTAL_DAILY_HISTORY_PREFETCH,
)
def init_instance_fixtures(self):
super(WithDataPortal, self).init_instance_fixtures()
self.data_portal = self.make_data_portal()
class WithResponses:
"""
ZiplineTestCase mixin that provides self.responses as an instance
fixture.
After init_instance_fixtures has been called, `self.responses` will be
a new `responses.RequestsMock` object. Users may add new endpoints to this
with the `self.responses.add` method.
"""
def init_instance_fixtures(self):
super(WithResponses, self).init_instance_fixtures()
self.responses = self.enter_instance_context(
responses.RequestsMock(),
)
class WithCreateBarData(WithDataPortal):
CREATE_BARDATA_DATA_FREQUENCY = "minute"
def create_bardata(self, simulation_dt_func, restrictions=None):
return BarData(
self.data_portal,
simulation_dt_func,
self.CREATE_BARDATA_DATA_FREQUENCY,
self.trading_calendar,
restrictions or NoRestrictions(),
)
class WithMakeAlgo(WithBenchmarkReturns, WithSimParams, WithLogger, WithDataPortal):
"""
ZiplineTestCase mixin that provides a ``make_algo`` method.
"""
START_DATE = pd.Timestamp("2014-12-29", tz="UTC")
END_DATE = pd.Timestamp("2015-1-05", tz="UTC")
SIM_PARAMS_DATA_FREQUENCY = "minute"
DEFAULT_ALGORITHM_CLASS = TradingAlgorithm
@classproperty
def BENCHMARK_SID(cls):
"""The sid to use as a benchmark.
Can be overridden to use an alternative benchmark.
"""
return cls.asset_finder.sids[0]
def merge_with_inherited_algo_kwargs(
self, overriding_type, suite_overrides, method_overrides
):
"""
Helper for subclasses overriding ``make_algo_kwargs``.
A common pattern for tests using `WithMakeAlgoKwargs` is that a
particular test suite has a set of default keywords it wants to use
everywhere, but also accepts test-specific overrides.
Test suites that fit that pattern can call this method and pass the
test class, suite-specific overrides, and method-specific overrides,
and this method takes care of fetching parent class overrides and
merging them with the suite- and instance-specific overrides.
Parameters
----------
overriding_type : type
The type from which we're being called. This is forwarded to
super().make_algo_kwargs()
suite_overrides : dict
Keywords which should take precedence over kwargs returned by
super(overriding_type, self).make_algo_kwargs(). These are
generally keyword arguments that are constant within a test suite.
method_overrides : dict
Keywords which should take precedence over `suite_overrides` and
superclass kwargs. These are generally keyword arguments that are
overridden on a per-test basis.
"""
# NOTE: This is a weird invocation of super().
# Our goal here is to provide the behavior that the caller would get if
# they called super() in the normal way, so that we dispatch to the
# make_algo_kwargs() for the parent of the type that's calling
# into us. We achieve that goal by requiring the caller to tell us
# what type they're calling us from.
return super(overriding_type, self).make_algo_kwargs(
**merge(suite_overrides, method_overrides)
)
def make_algo_kwargs(self, **overrides):
if self.BENCHMARK_SID is None:
overrides.setdefault("benchmark_returns", self.BENCHMARK_RETURNS)
return merge(
{
"sim_params": self.sim_params,
"data_portal": self.data_portal,
"benchmark_sid": self.BENCHMARK_SID,
},
overrides,
)
def make_algo(self, algo_class=None, **overrides):
if algo_class is None:
algo_class = self.DEFAULT_ALGORITHM_CLASS
return algo_class(**self.make_algo_kwargs(**overrides))
def run_algorithm(self, **overrides):
"""
Create and run an TradingAlgorithm in memory.
"""
return self.make_algo(**overrides).run()
register_calendar_alias("TEST", "NYSE")
class WithSeededRandomState:
RANDOM_SEED = np.array(list("lmao"), dtype="S1").view("i4").item()
def init_instance_fixtures(self):
super(WithSeededRandomState, self).init_instance_fixtures()
self.rand = np.random.RandomState(self.RANDOM_SEED)
class WithFXRates:
"""Fixture providing a factory for in-memory exchange rate data."""
# Start date for exchange rates data.
FX_RATES_START_DATE = alias("START_DATE")
# End date for exchange rates data.
FX_RATES_END_DATE = alias("END_DATE")
# Calendar to which exchange rates data is aligned.
FX_RATES_CALENDAR = "24/5"
# Currencies between which exchange rates can be calculated.
FX_RATES_CURRENCIES = ["USD", "CAD", "GBP", "EUR"]
# Kinds of rates for which exchange rate data is present.
FX_RATES_RATE_NAMES = ["mid"]
# Default chunk size used for fx artifact compression.
HDF5_FX_CHUNK_SIZE = 75
# Rate used by default for Pipeline API queries that don't specify a rate
# explicitly.
@classproperty
def FX_RATES_DEFAULT_RATE(cls):
return cls.FX_RATES_RATE_NAMES[0]
@classmethod
def init_class_fixtures(cls):
super(WithFXRates, cls).init_class_fixtures()
cal = get_calendar(cls.FX_RATES_CALENDAR)
cls.fx_rates_sessions = cal.sessions_in_range(
cls.FX_RATES_START_DATE,
cls.FX_RATES_END_DATE,
)
cls.fx_rates = cls.make_fx_rates(
cls.FX_RATES_RATE_NAMES,
cls.FX_RATES_CURRENCIES,
cls.fx_rates_sessions,
)
cls.in_memory_fx_rate_reader = InMemoryFXRateReader(
cls.fx_rates,
cls.FX_RATES_DEFAULT_RATE,
)
@classmethod
def make_fx_rates_from_reference(cls, reference):
"""
Helper method for implementing make_fx_rates.
Takes a (dates x currencies) DataFrame of "reference" values, which are
assumed to be the "true" value of each currency in some unknown
external currency. Computes fx rates from A -> B as by dividing the
reference value for A by the reference value for B.
Parameters
----------
reference : pd.DataFrame
DataFrame of "true" values for currencies.
Returns
-------
rates : dict[str, pd.DataFrame]
Map from quote currency to FX rates for that currency.
"""
out = {}
for quote in reference.columns:
out[quote] = reference.divide(reference[quote], axis=0)
return out
@classmethod
def make_fx_rates(cls, rate_names, currencies, sessions):
rng = np.random.RandomState(42)
out = {}
for rate_name in rate_names:
cols = {}
for currency in currencies:
start, end = sorted(rng.uniform(0.5, 1.5, (2,)))
cols[currency] = np.linspace(start, end, len(sessions))
reference = pd.DataFrame(cols, index=sessions, columns=currencies)
out[rate_name] = cls.make_fx_rates_from_reference(reference)
return out
@classmethod
def write_h5_fx_rates(cls, path):
"""Write cls.fx_rates to disk with an HDF5FXRateWriter.
Returns an HDF5FXRateReader that reader from written data.
"""
sessions = cls.fx_rates_sessions
# Write in-memory data to h5 file.
with h5py.File(path, "w") as h5_file:
writer = HDF5FXRateWriter(h5_file, cls.HDF5_FX_CHUNK_SIZE)
fx_data = (
(rate, quote, quote_frame.values)
for rate, rate_dict in cls.fx_rates.items()
for quote, quote_frame in rate_dict.items()
)
writer.write(
dts=sessions.values,
currencies=np.array(cls.FX_RATES_CURRENCIES, dtype=object),
data=fx_data,
)
h5_file = cls.enter_class_context(h5py.File(path, "r"))
return HDF5FXRateReader(
h5_file,
default_rate=cls.FX_RATES_DEFAULT_RATE,
)
@classmethod
def get_expected_fx_rate_scalar(cls, rate, quote, base, dt):
"""Get the expected FX rate for the given scalar coordinates."""
if base is None:
return np.nan
if rate == DEFAULT_FX_RATE:
rate = cls.FX_RATES_DEFAULT_RATE
col = cls.fx_rates[rate][quote][base]
if dt < col.index[0]:
return np.nan
# PERF: We call this function a lot in some suites, and get_loc is
# surprisingly expensive, so optimizing it has a meaningful impact on
# overall suite performance. See test_fast_get_loc_ffilled_for
# assurance that this behaves the same as get_loc.
ix = fast_get_loc_ffilled(col.index.values, dt.asm8)
return col.values[ix]
@classmethod
def get_expected_fx_rates(cls, rate, quote, bases, dts):
"""Get an array of expected FX rates for the given indices."""
out = np.empty((len(dts), len(bases)), dtype="float64")
for i, dt in enumerate(dts):
for j, base in enumerate(bases):
out[i, j] = cls.get_expected_fx_rate_scalar(
rate,
quote,
base,
dt,
)
return out
@classmethod
def get_expected_fx_rates_columnar(cls, rate, quote, bases, dts):
assert len(bases) == len(dts)
rates = [
cls.get_expected_fx_rate_scalar(rate, quote, base, dt)
for base, dt in zip(bases, dts)
]
return np.array(rates, dtype="float64")
def fast_get_loc_ffilled(dts, dt):
"""
Equivalent to dts.get_loc(dt, method='ffill'), but with reasonable
microperformance.
"""
ix = dts.searchsorted(dt, side="right") - 1
if ix < 0:
raise KeyError(dt)
return ix | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/testing/fixtures.py | fixtures.py |
from abc import ABCMeta, abstractmethod, abstractproperty
from contextlib import contextmanager
import gzip
from itertools import (
combinations,
count,
product,
)
import json
import operator
import os
from os.path import abspath, dirname, join, realpath
import shutil
import sys
import tempfile
from traceback import format_exception
from logbook import TestHandler
from mock import patch
from numpy.testing import assert_allclose, assert_array_equal
import pandas as pd
from sqlalchemy import create_engine
from testfixtures import TempDirectory
from toolz import concat, curry
from zipline.utils.calendar_utils import get_calendar
from zipline.assets import AssetFinder, AssetDBWriter
from zipline.assets.synthetic import make_simple_equity_info
from zipline.utils.compat import getargspec, wraps
from zipline.data.data_portal import DataPortal
from zipline.data.minute_bars import (
BcolzMinuteBarReader,
BcolzMinuteBarWriter,
US_EQUITIES_MINUTES_PER_DAY,
)
from zipline.data.bcolz_daily_bars import (
BcolzDailyBarReader,
BcolzDailyBarWriter,
)
from zipline.finance.blotter import SimulationBlotter
from zipline.finance.order import ORDER_STATUS
from zipline.lib.labelarray import LabelArray
from zipline.pipeline.data import EquityPricing
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.loaders.testing import make_seeded_random_loader
from zipline.utils import security_list
from zipline.utils.input_validation import expect_dimensions
from zipline.utils.numpy_utils import as_column, isnat
from zipline.utils.pandas_utils import timedelta_to_integral_seconds
from zipline.utils.sentinel import sentinel
import numpy as np
from numpy import float64
EPOCH = pd.Timestamp(0, tz="UTC")
def seconds_to_timestamp(seconds):
return pd.Timestamp(seconds, unit="s", tz="UTC")
def to_utc(time_str):
"""Convert a string in US/Eastern time to UTC"""
return pd.Timestamp(time_str, tz="US/Eastern").tz_convert("UTC")
def str_to_seconds(s):
"""
Convert a pandas-intelligible string to (integer) seconds since UTC.
>>> from pandas import Timestamp
>>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds()
1388534400.0
>>> str_to_seconds('2014-01-01')
1388534400
"""
return timedelta_to_integral_seconds(pd.Timestamp(s, tz="UTC") - EPOCH)
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if "daily_perf" in update:
transaction_count += len(update["daily_perf"]["transactions"])
return output, transaction_count
def check_algo_results(
test,
results,
expected_transactions_count=None,
expected_order_count=None,
expected_positions_count=None,
sid=None,
):
if expected_transactions_count is not None:
txns = flatten_list(results["transactions"])
test.assertEqual(expected_transactions_count, len(txns))
if expected_positions_count is not None:
raise NotImplementedError
if expected_order_count is not None:
# de-dup orders on id, because orders are put back into perf packets
# whenever they a txn is filled
orders = set([order["id"] for order in flatten_list(results["orders"])])
test.assertEqual(expected_order_count, len(orders))
def flatten_list(list):
return [item for sublist in list for item in sublist]
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if "expected_transactions" in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config["expected_transactions"], transaction_count
)
else:
test.assertEqual(test.zipline_test_config["order_count"], transaction_count)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]["daily_perf"]["positions"]
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if "daily_perf" in update:
if "orders" in update["daily_perf"]:
for order in update["daily_perf"]["orders"]:
orders_by_id[order["id"]] = order
for order in orders_by_id.value():
test.assertEqual(order["status"], ORDER_STATUS.FILLED, "")
test.assertEqual(len(closing_positions), 1, "Portfolio should have one position.")
sid = test.zipline_test_config["sid"]
test.assertEqual(
closing_positions[0]["sid"],
sid,
"Portfolio should have one position in " + str(sid),
)
return output, transaction_count
@contextmanager
def security_list_copy():
old_dir = security_list.SECURITY_LISTS_DIR
new_dir = tempfile.mkdtemp()
try:
for subdir in os.listdir(old_dir):
shutil.copytree(
os.path.join(old_dir, subdir), os.path.join(new_dir, subdir)
)
with patch.object(
security_list, "SECURITY_LISTS_DIR", new_dir
), patch.object(security_list, "using_copy", True, create=True):
yield
finally:
shutil.rmtree(new_dir, True)
def add_security_data(adds, deletes):
if not hasattr(security_list, "using_copy"):
raise Exception(
"add_security_data must be used within " "security_list_copy context"
)
directory = os.path.join(
security_list.SECURITY_LISTS_DIR, "leveraged_etf_list/20150127/20150125"
)
if not os.path.exists(directory):
os.makedirs(directory)
del_path = os.path.join(directory, "delete")
with open(del_path, "w") as f:
for sym in deletes:
f.write(sym)
f.write("\n")
add_path = os.path.join(directory, "add")
with open(add_path, "w") as f:
for sym in adds:
f.write(sym)
f.write("\n")
def all_pairs_matching_predicate(values, pred):
"""
Return an iterator of all pairs, (v0, v1) from values such that
`pred(v0, v1) == True`
Parameters
----------
values : iterable
pred : function
Returns
-------
pairs_iterator : generator
Generator yielding pairs matching `pred`.
Examples
--------
>>> from zipline.testing import all_pairs_matching_predicate
>>> from operator import eq, lt
>>> list(all_pairs_matching_predicate(range(5), eq))
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> list(all_pairs_matching_predicate("abcd", lt))
[('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]
"""
return filter(lambda pair: pred(*pair), product(values, repeat=2))
def product_upper_triangle(values, include_diagonal=False):
"""
Return an iterator over pairs, (v0, v1), drawn from values.
If `include_diagonal` is True, returns all pairs such that v0 <= v1.
If `include_diagonal` is False, returns all pairs such that v0 < v1.
"""
return all_pairs_matching_predicate(
values,
operator.le if include_diagonal else operator.lt,
)
def all_subindices(index):
"""
Return all valid sub-indices of a pandas Index.
"""
return (
index[start:stop]
for start, stop in product_upper_triangle(range(len(index) + 1))
)
def make_trade_data_for_asset_info(
dates,
asset_info,
price_start,
price_step_by_date,
price_step_by_sid,
volume_start,
volume_step_by_date,
volume_step_by_sid,
):
"""
Convert the asset info dataframe into a dataframe of trade data for each
sid, and write to the writer if provided. Write NaNs for locations where
assets did not exist. Return a dict of the dataframes, keyed by sid.
"""
trade_data = {}
sids = asset_info.index
price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid
price_date_deltas = np.arange(len(dates), dtype=float64) * price_step_by_date
prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start
volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid
volume_date_deltas = np.arange(len(dates)) * volume_step_by_date
volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start
for j, sid in enumerate(sids):
start_date, end_date = asset_info.loc[sid, ["start_date", "end_date"]]
# Normalize here so the we still generate non-NaN values on the minutes
# for an asset's last trading day.
for i, date in enumerate(dates.normalize()):
if not (start_date <= date <= end_date):
prices[i, j] = 0
volumes[i, j] = 0
df = pd.DataFrame(
{
"open": prices[:, j],
"high": prices[:, j],
"low": prices[:, j],
"close": prices[:, j],
"volume": volumes[:, j],
},
index=dates,
)
trade_data[sid] = df
return trade_data
def check_allclose(actual, desired, rtol=1e-07, atol=0, err_msg="", verbose=True):
"""
Wrapper around np.testing.assert_allclose that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_allclose
"""
if type(actual) != type(desired):
raise AssertionError("%s != %s" % (type(actual), type(desired)))
return assert_allclose(
actual,
desired,
atol=atol,
rtol=rtol,
err_msg=err_msg,
verbose=verbose,
)
def check_arrays(x, y, err_msg="", verbose=True, check_dtypes=True):
"""
Wrapper around np.testing.assert_array_equal that also verifies that inputs
are ndarrays.
See Also
--------
np.assert_array_equal
"""
assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y))
assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y)
if isinstance(x, LabelArray):
# Check that both arrays have missing values in the same locations...
assert_array_equal(
x.is_missing(),
y.is_missing(),
err_msg=err_msg,
verbose=verbose,
)
# ...then check the actual values as well.
x = x.as_string_array()
y = y.as_string_array()
elif x.dtype.kind in "mM":
x_isnat = isnat(x)
y_isnat = isnat(y)
assert_array_equal(
x_isnat,
y_isnat,
err_msg="NaTs not equal",
verbose=verbose,
)
# Fill NaTs with zero for comparison.
x = np.where(x_isnat, np.zeros_like(x), x)
y = np.where(y_isnat, np.zeros_like(y), y)
return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose)
class UnexpectedAttributeAccess(Exception):
pass
class ExplodingObject(object):
"""
Object that will raise an exception on any attribute access.
Useful for verifying that an object is never touched during a
function/method call.
"""
def __getattribute__(self, name):
raise UnexpectedAttributeAccess(name)
def write_minute_data(trading_calendar, tempdir, minutes, sids):
first_session = trading_calendar.minute_to_session_label(
minutes[0], direction="none"
)
last_session = trading_calendar.minute_to_session_label(
minutes[-1], direction="none"
)
sessions = trading_calendar.sessions_in_range(first_session, last_session)
write_bcolz_minute_data(
trading_calendar,
sessions,
tempdir.path,
create_minute_bar_data(minutes, sids),
)
return tempdir.path
def create_minute_bar_data(minutes, sids):
length = len(minutes)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": np.arange(length) + 10 + sid_idx,
"high": np.arange(length) + 15 + sid_idx,
"low": np.arange(length) + 8 + sid_idx,
"close": np.arange(length) + 10 + sid_idx,
"volume": 100 + sid_idx,
},
index=minutes,
)
def create_daily_bar_data(sessions, sids):
length = len(sessions)
for sid_idx, sid in enumerate(sids):
yield sid, pd.DataFrame(
{
"open": (np.array(range(10, 10 + length)) + sid_idx),
"high": (np.array(range(15, 15 + length)) + sid_idx),
"low": (np.array(range(8, 8 + length)) + sid_idx),
"close": (np.array(range(10, 10 + length)) + sid_idx),
"volume": np.array(range(100, 100 + length)) + sid_idx,
"day": [session.value for session in sessions],
},
index=sessions,
)
def write_daily_data(tempdir, sim_params, sids, trading_calendar):
path = os.path.join(tempdir.path, "testdaily.bcolz")
BcolzDailyBarWriter(
path, trading_calendar, sim_params.start_session, sim_params.end_session
).write(
create_daily_bar_data(sim_params.sessions, sids),
)
return path
def create_data_portal(
asset_finder,
tempdir,
sim_params,
sids,
trading_calendar,
adjustment_reader=None,
):
if sim_params.data_frequency == "daily":
daily_path = write_daily_data(tempdir, sim_params, sids, trading_calendar)
equity_daily_reader = BcolzDailyBarReader(daily_path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
adjustment_reader=adjustment_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open, sim_params.last_close
)
minute_path = write_minute_data(trading_calendar, tempdir, minutes, sids)
equity_minute_reader = BcolzMinuteBarReader(minute_path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
adjustment_reader=adjustment_reader,
)
def write_bcolz_minute_data(trading_calendar, days, path, data):
BcolzMinuteBarWriter(
path, trading_calendar, days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY
).write(data)
def create_minute_df_for_asset(
trading_calendar,
start_dt,
end_dt,
interval=1,
start_val=1,
minute_blacklist=None,
):
asset_minutes = trading_calendar.minutes_for_sessions_in_range(start_dt, end_dt)
minutes_count = len(asset_minutes)
if interval > 1:
minutes_arr = np.zeros(minutes_count)
minutes_arr[interval - 1 :: interval] = np.arange(
start_val + interval - 1, start_val + minutes_count, interval
)
else:
minutes_arr = np.arange(start_val, start_val + minutes_count)
open_ = minutes_arr.copy()
open_[interval - 1 :: interval] += 1
high = minutes_arr.copy()
high[interval - 1 :: interval] += 2
low = minutes_arr.copy()
low[interval - 1 :: interval] -= 1
df = pd.DataFrame(
{
"open": open_,
"high": high,
"low": low,
"close": minutes_arr,
"volume": 100 * minutes_arr,
},
index=asset_minutes,
)
if minute_blacklist is not None:
for minute in minute_blacklist:
df.loc[minute] = 0
return df
def create_daily_df_for_asset(trading_calendar, start_day, end_day, interval=1):
days = trading_calendar.sessions_in_range(start_day, end_day)
days_count = len(days)
days_arr = np.arange(days_count) + 2
df = pd.DataFrame(
{
"open": days_arr + 1,
"high": days_arr + 2,
"low": days_arr - 1,
"close": days_arr,
"volume": days_arr * 100,
},
index=days,
)
if interval > 1:
# only keep every 'interval' rows
for idx, _ in enumerate(days_arr):
if (idx + 1) % interval != 0:
df["open"].iloc[idx] = 0
df["high"].iloc[idx] = 0
df["low"].iloc[idx] = 0
df["close"].iloc[idx] = 0
df["volume"].iloc[idx] = 0
return df
def trades_by_sid_to_dfs(trades_by_sid, index):
for sidint, trades in trades_by_sid.items():
opens = []
highs = []
lows = []
closes = []
volumes = []
for trade in trades:
opens.append(trade.open_price)
highs.append(trade.high)
lows.append(trade.low)
closes.append(trade.close_price)
volumes.append(trade.volume)
yield sidint, pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
},
index=index,
)
def create_data_portal_from_trade_history(
asset_finder, trading_calendar, tempdir, sim_params, trades_by_sid
):
if sim_params.data_frequency == "daily":
path = os.path.join(tempdir.path, "testdaily.bcolz")
writer = BcolzDailyBarWriter(
path,
trading_calendar,
sim_params.start_session,
sim_params.end_session,
)
writer.write(
trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions),
)
equity_daily_reader = BcolzDailyBarReader(path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_daily_reader.first_trading_day,
equity_daily_reader=equity_daily_reader,
)
else:
minutes = trading_calendar.minutes_in_range(
sim_params.first_open, sim_params.last_close
)
length = len(minutes)
assets = {}
for sidint, trades in trades_by_sid.items():
opens = np.zeros(length)
highs = np.zeros(length)
lows = np.zeros(length)
closes = np.zeros(length)
volumes = np.zeros(length)
for trade in trades:
# put them in the right place
idx = minutes.searchsorted(trade.dt)
opens[idx] = trade.open_price * 1000
highs[idx] = trade.high * 1000
lows[idx] = trade.low * 1000
closes[idx] = trade.close_price * 1000
volumes[idx] = trade.volume
assets[sidint] = pd.DataFrame(
{
"open": opens,
"high": highs,
"low": lows,
"close": closes,
"volume": volumes,
"dt": minutes,
}
).set_index("dt")
write_bcolz_minute_data(
trading_calendar, sim_params.sessions, tempdir.path, assets
)
equity_minute_reader = BcolzMinuteBarReader(tempdir.path)
return DataPortal(
asset_finder,
trading_calendar,
first_trading_day=equity_minute_reader.first_trading_day,
equity_minute_reader=equity_minute_reader,
)
class FakeDataPortal(DataPortal):
def __init__(self, asset_finder, trading_calendar=None, first_trading_day=None):
if trading_calendar is None:
trading_calendar = get_calendar("NYSE")
super(FakeDataPortal, self).__init__(
asset_finder, trading_calendar, first_trading_day
)
def get_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency):
if field == "volume":
return 100
else:
return 1.0
def get_history_window(
self,
assets,
end_dt,
bar_count,
frequency,
field,
data_frequency,
ffill=True,
):
end_idx = self.trading_calendar.all_sessions.searchsorted(end_dt)
days = self.trading_calendar.all_sessions[
(end_idx - bar_count + 1) : (end_idx + 1)
]
df = pd.DataFrame(
np.full((bar_count, len(assets)), 100.0), index=days, columns=assets
)
if frequency == "1m" and not df.empty:
df = df.reindex(
self.trading_calendar.minutes_for_sessions_in_range(
df.index[0],
df.index[-1],
),
method="ffill",
)
return df
class FetcherDataPortal(DataPortal):
"""
Mock dataportal that returns fake data for history and non-fetcher
spot value.
"""
def __init__(self, asset_finder, trading_calendar, first_trading_day=None):
super(FetcherDataPortal, self).__init__(
asset_finder, trading_calendar, first_trading_day
)
def get_spot_value(self, asset, field, dt, data_frequency):
# if this is a fetcher field, exercise the regular code path
if self._is_extra_source(asset, field, self._augmented_sources_map):
return super(FetcherDataPortal, self).get_spot_value(
asset, field, dt, data_frequency
)
# otherwise just return a fixed value
return int(asset)
# XXX: These aren't actually the methods that are used by the superclasses,
# so these don't do anything, and this class will likely produce unexpected
# results for history().
def _get_daily_window_for_sid(self, asset, field, days_in_window, extra_slot=True):
return np.arange(days_in_window, dtype=np.float64)
def _get_minute_window_for_asset(self, asset, field, minutes_for_window):
return np.arange(minutes_for_window, dtype=np.float64)
class tmp_assets_db(object):
"""Create a temporary assets sqlite database.
This is meant to be used as a context manager.
Parameters
----------
url : string
The URL for the database connection.
**frames
The frames to pass to the AssetDBWriter.
By default this maps equities:
('A', 'B', 'C') -> map(ord, 'ABC')
See Also
--------
empty_assets_db
tmp_asset_finder
"""
_default_equities = sentinel("_default_equities")
def __init__(self, url="sqlite:///:memory:", equities=_default_equities, **frames):
self._url = url
self._eng = None
if equities is self._default_equities:
equities = make_simple_equity_info(
list(map(ord, "ABC")),
pd.Timestamp(0),
pd.Timestamp("2015"),
)
frames["equities"] = equities
self._frames = frames
self._eng = None # set in enter and exit
def __enter__(self):
self._eng = eng = create_engine(self._url)
AssetDBWriter(eng).write(**self._frames)
return eng
def __exit__(self, *excinfo):
assert self._eng is not None, "_eng was not set in __enter__"
self._eng.dispose()
self._eng = None
def empty_assets_db():
"""Context manager for creating an empty assets db.
See Also
--------
tmp_assets_db
"""
return tmp_assets_db(equities=None)
class tmp_asset_finder(tmp_assets_db):
"""Create a temporary asset finder using an in memory sqlite db.
Parameters
----------
url : string
The URL for the database connection.
finder_cls : type, optional
The type of asset finder to create from the assets db.
**frames
Forwarded to ``tmp_assets_db``.
See Also
--------
tmp_assets_db
"""
def __init__(
self,
url="sqlite:///:memory:",
finder_cls=AssetFinder,
future_chain_predicates=None,
**frames,
):
self._finder_cls = finder_cls
self._future_chain_predicates = future_chain_predicates
super(tmp_asset_finder, self).__init__(url=url, **frames)
def __enter__(self):
return self._finder_cls(
super(tmp_asset_finder, self).__enter__(),
future_chain_predicates=self._future_chain_predicates,
)
def empty_asset_finder():
"""Context manager for creating an empty asset finder.
See Also
--------
empty_assets_db
tmp_assets_db
tmp_asset_finder
"""
return tmp_asset_finder(equities=None)
class SubTestFailures(AssertionError):
def __init__(self, *failures):
self.failures = failures
@staticmethod
def _format_exc(exc_info):
# we need to do this weird join-split-join to ensure that the full
# message is indented by 4 spaces
return "\n ".join("".join(format_exception(*exc_info)).splitlines())
def __str__(self):
return "failures:\n %s" % "\n ".join(
"\n ".join(
(
", ".join("%s=%r" % item for item in scope.items()),
self._format_exc(exc_info),
)
)
for scope, exc_info in self.failures
)
# @nottest
def subtest(iterator, *_names):
"""
Construct a subtest in a unittest.
Consider using ``zipline.testing.parameter_space`` when subtests
are constructed over a single input or over the cross-product of multiple
inputs.
``subtest`` works by decorating a function as a subtest. The decorated
function will be run by iterating over the ``iterator`` and *unpacking the
values into the function. If any of the runs fail, the result will be put
into a set and the rest of the tests will be run. Finally, if any failed,
all of the results will be dumped as one failure.
Parameters
----------
iterator : iterable[iterable]
The iterator of arguments to pass to the function.
*name : iterator[str]
The names to use for each element of ``iterator``. These will be used
to print the scope when a test fails. If not provided, it will use the
integer index of the value as the name.
Examples
--------
::
class MyTest(TestCase):
def test_thing(self):
# Example usage inside another test.
@subtest(([n] for n in range(100000)), 'n')
def subtest(n):
self.assertEqual(n % 2, 0, 'n was not even')
subtest()
@subtest(([n] for n in range(100000)), 'n')
def test_decorated_function(self, n):
# Example usage to parameterize an entire function.
self.assertEqual(n % 2, 1, 'n was not odd')
Notes
-----
We use this when we:
* Will never want to run each parameter individually.
* Have a large parameter space we are testing
(see tests/utils/test_events.py).
``nose_parameterized.expand`` will create a test for each parameter
combination which bloats the test output and makes the travis pages slow.
We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and
nose2 do not support ``addSubTest``.
See Also
--------
zipline.testing.parameter_space
"""
def dec(f):
@wraps(f)
def wrapped(*args, **kwargs):
names = _names
failures = []
for scope in iterator:
scope = tuple(scope)
try:
f(*args + scope, **kwargs)
except Exception:
info = sys.exc_info()
if not names:
names = count()
failures.append((dict(zip(names, scope)), info))
if failures:
raise SubTestFailures(*failures)
return wrapped
return dec
class MockDailyBarReader(object):
def __init__(self, dates):
self.sessions = pd.DatetimeIndex(dates)
def load_raw_arrays(self, columns, start, stop, sids):
dates = self.sessions
if start < dates[0]:
raise ValueError("start date is out of bounds for this reader")
if stop > dates[-1]:
raise ValueError("stop date is out of bounds for this reader")
output_dates = dates[(dates >= start) & (dates <= stop)]
return [np.full((len(output_dates), len(sids)), 100.0) for _ in columns]
def get_value(self, col, sid, dt):
return 100.0
def create_mock_adjustment_data(splits=None, dividends=None, mergers=None):
if splits is None:
splits = create_empty_splits_mergers_frame()
elif not isinstance(splits, pd.DataFrame):
splits = pd.DataFrame(splits)
if mergers is None:
mergers = create_empty_splits_mergers_frame()
elif not isinstance(mergers, pd.DataFrame):
mergers = pd.DataFrame(mergers)
if dividends is None:
dividends = create_empty_dividends_frame()
elif not isinstance(dividends, pd.DataFrame):
dividends = pd.DataFrame(dividends)
return splits, mergers, dividends
def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""):
"""
Assert that two pandas Timestamp objects are the same.
Parameters
----------
left, right : pd.Timestamp
The values to compare.
compare_nat_equal : bool, optional
Whether to consider `NaT` values equal. Defaults to True.
msg : str, optional
A message to forward to `pd.util.testing.assert_equal`.
"""
if compare_nat_equal and left is pd.NaT and right is pd.NaT:
return
return pd.util.testing.assert_equal(left, right, msg=msg)
def powerset(values):
"""
Return the power set (i.e., the set of all subsets) of entries in `values`.
"""
return concat(combinations(values, i) for i in range(len(values) + 1))
def to_series(knowledge_dates, earning_dates):
"""
Helper for converting a dict of strings to a Series of datetimes.
This is just for making the test cases more readable.
"""
return pd.Series(
index=pd.to_datetime(knowledge_dates),
data=pd.to_datetime(earning_dates),
)
def gen_calendars(start, stop, critical_dates):
"""
Generate calendars to use as inputs.
"""
all_dates = pd.date_range(start, stop, tz="utc")
for to_drop in map(list, powerset(critical_dates)):
# Have to yield tuples.
yield (all_dates.drop(to_drop),)
# Also test with the trading calendar.
trading_days = get_calendar("NYSE").all_days
yield (trading_days[trading_days.slice_indexer(start, stop)],)
@contextmanager
def temp_pipeline_engine(calendar, sids, random_seed, symbols=None):
"""
A contextManager that yields a SimplePipelineEngine holding a reference to
an AssetFinder generated via tmp_asset_finder.
Parameters
----------
calendar : pd.DatetimeIndex
Calendar to pass to the constructed PipelineEngine.
sids : iterable[int]
Sids to use for the temp asset finder.
random_seed : int
Integer used to seed instances of SeededRandomLoader.
symbols : iterable[str], optional
Symbols for constructed assets. Forwarded to make_simple_equity_info.
"""
equity_info = make_simple_equity_info(
sids=sids,
start_date=calendar[0],
end_date=calendar[-1],
symbols=symbols,
)
loader = make_seeded_random_loader(random_seed, calendar, sids)
def get_loader(column):
return loader
with tmp_asset_finder(equities=equity_info) as finder:
yield SimplePipelineEngine(get_loader, calendar, finder)
def bool_from_envvar(name, default=False, env=None):
"""
Get a boolean value from the environment, making a reasonable attempt to
convert "truthy" values to True and "falsey" values to False.
Strings are coerced to bools using ``json.loads(s.lower())``.
Parameters
----------
name : str
Name of the environment variable.
default : bool, optional
Value to use if the environment variable isn't set. Default is False
env : dict-like, optional
Mapping in which to look up ``name``. This is a parameter primarily for
testing purposes. Default is os.environ.
Returns
-------
value : bool
``env[name]`` coerced to a boolean, or ``default`` if ``name`` is not
in ``env``.
"""
if env is None:
env = os.environ
value = env.get(name)
if value is None:
return default
try:
# Try to parse as JSON. This makes strings like "0", "False", and
# "null" evaluate as falsey values.
value = json.loads(value.lower())
except ValueError:
# If the value can't be parsed as json, assume it should be treated as
# a string for the purpose of evaluation.
pass
return bool(value)
_FAIL_FAST_DEFAULT = bool_from_envvar("PARAMETER_SPACE_FAIL_FAST")
def parameter_space(__fail_fast=_FAIL_FAST_DEFAULT, **params):
"""
Wrapper around subtest that allows passing keywords mapping names to
iterables of values.
The decorated test function will be called with the cross-product of all
possible inputs
Examples
--------
>>> from unittest import TestCase
>>> class SomeTestCase(TestCase):
... @parameter_space(x=[1, 2], y=[2, 3])
... def test_some_func(self, x, y):
... # Will be called with every possible combination of x and y.
... self.assertEqual(somefunc(x, y), expected_result(x, y))
See Also
--------
zipline.testing.subtest
"""
def decorator(f):
argspec = getargspec(f)
if argspec.varargs:
raise AssertionError("parameter_space() doesn't support *args")
if argspec.keywords:
raise AssertionError("parameter_space() doesn't support **kwargs")
if argspec.defaults:
raise AssertionError("parameter_space() doesn't support defaults.")
# Skip over implicit self.
argnames = argspec.args
if argnames[0] == "self":
argnames = argnames[1:]
extra = set(params) - set(argnames)
if extra:
raise AssertionError(
"Keywords %s supplied to parameter_space() are "
"not in function signature." % extra
)
unspecified = set(argnames) - set(params)
if unspecified:
raise AssertionError(
"Function arguments %s were not "
"supplied to parameter_space()." % unspecified
)
def make_param_sets():
return product(*(params[name] for name in argnames))
def clean_f(self, *args, **kwargs):
try:
f(self, *args, **kwargs)
finally:
self.tearDown()
self.setUp()
if __fail_fast:
@wraps(f)
def wrapped(self):
for args in make_param_sets():
clean_f(self, *args)
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
subtest(make_param_sets(), *argnames)(clean_f)(*args, **kwargs)
return wrapped
return decorator
def create_empty_dividends_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
("ex_date", "datetime64[ns]"),
("pay_date", "datetime64[ns]"),
("record_date", "datetime64[ns]"),
("declared_date", "datetime64[ns]"),
("amount", "float64"),
("sid", "int32"),
],
),
index=pd.DatetimeIndex([], tz="UTC"),
)
def create_empty_splits_mergers_frame():
return pd.DataFrame(
np.array(
[],
dtype=[
("effective_date", "int64"),
("ratio", "float64"),
("sid", "int64"),
],
),
index=pd.DatetimeIndex([]),
)
def make_alternating_boolean_array(shape, first_value=True):
"""
Create a 2D numpy array with the given shape containing alternating values
of False, True, False, True,... along each row and each column.
Examples
--------
>>> make_alternating_boolean_array((4,4))
array([[ True, False, True, False],
[False, True, False, True],
[ True, False, True, False],
[False, True, False, True]], dtype=bool)
>>> make_alternating_boolean_array((4,3), first_value=False)
array([[False, True, False],
[ True, False, True],
[False, True, False],
[ True, False, True]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
"Shape must be 2-dimensional. Given shape was {}".format(shape)
)
alternating = np.empty(shape, dtype=bool)
for row in alternating:
row[::2] = first_value
row[1::2] = not (first_value)
first_value = not (first_value)
return alternating
def make_cascading_boolean_array(shape, first_value=True):
"""
Create a numpy array with the given shape containing cascading boolean
values, with `first_value` being the top-left value.
Examples
--------
>>> make_cascading_boolean_array((4,4))
array([[ True, True, True, False],
[ True, True, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
>>> make_cascading_boolean_array((4,2))
array([[ True, False],
[False, False],
[False, False],
[False, False]], dtype=bool)
>>> make_cascading_boolean_array((2,4))
array([[ True, True, True, False],
[ True, True, False, False]], dtype=bool)
"""
if len(shape) != 2:
raise ValueError(
"Shape must be 2-dimensional. Given shape was {}".format(shape)
)
cascading = np.full(shape, not (first_value), dtype=bool)
ending_col = shape[1] - 1
for row in cascading:
if ending_col > 0:
row[:ending_col] = first_value
ending_col -= 1
else:
break
return cascading
@expect_dimensions(array=2)
def permute_rows(seed, array):
"""
Shuffle each row in ``array`` based on permutations generated by ``seed``.
Parameters
----------
seed : int
Seed for numpy.RandomState
array : np.ndarray[ndim=2]
Array over which to apply permutations.
"""
rand = np.random.RandomState(seed)
return np.apply_along_axis(rand.permutation, 1, array)
# @nottest
def make_test_handler(testcase, *args, **kwargs):
"""
Returns a TestHandler which will be used by the given testcase. This
handler can be used to test log messages.
Parameters
----------
testcase: unittest.TestCase
The test class in which the log handler will be used.
*args, **kwargs
Forwarded to the new TestHandler object.
Returns
-------
handler: logbook.TestHandler
The handler to use for the test case.
"""
handler = TestHandler(*args, **kwargs)
testcase.addCleanup(handler.close)
return handler
def write_compressed(path, content):
"""
Write a compressed (gzipped) file to `path`.
"""
with gzip.open(path, "wb") as f:
f.write(content)
def read_compressed(path):
"""
Write a compressed (gzipped) file from `path`.
"""
with gzip.open(path, "rb") as f:
return f.read()
zipline_reloaded_git_root = abspath(
join(realpath(dirname(__file__)), "..", "..", ".."),
)
# @nottest
def test_resource_path(*path_parts):
return os.path.join(zipline_reloaded_git_root, "tests", "resources", *path_parts)
@contextmanager
def patch_os_environment(remove=None, **values):
"""
Context manager for patching the operating system environment.
"""
old_values = {}
remove = remove or []
for key in remove:
old_values[key] = os.environ.pop(key)
for key, value in values.iteritems():
old_values[key] = os.getenv(key)
os.environ[key] = value
try:
yield
finally:
for old_key, old_value in old_values.iteritems():
if old_value is None:
# Value was not present when we entered, so del it out if it's
# still present.
try:
del os.environ[key]
except KeyError:
pass
else:
# Restore the old value.
os.environ[old_key] = old_value
class tmp_dir(TempDirectory, object):
"""New style class that wrapper for TempDirectory in python 2."""
pass
class _TmpBarReader(tmp_dir, metaclass=ABCMeta):
"""A helper for tmp_bcolz_equity_minute_bar_reader and
tmp_bcolz_equity_daily_bar_reader.
Parameters
----------
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
"""
@abstractproperty
def _reader_cls(self):
raise NotImplementedError("_reader")
@abstractmethod
def _write(self, cal, days, path, data):
raise NotImplementedError("_write")
def __init__(self, cal, days, data, path=None):
super(_TmpBarReader, self).__init__(path=path)
self._cal = cal
self._days = days
self._data = data
def __enter__(self):
tmpdir = super(_TmpBarReader, self).__enter__()
try:
self._write(
self._cal,
self._days,
tmpdir.path,
self._data,
)
return self._reader_cls(tmpdir.path)
except BaseException: # Clean up even on KeyboardInterrupt
self.__exit__(None, None, None)
raise
class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader):
"""A temporary BcolzMinuteBarReader object.
Parameters
----------
cal : TradingCalendar
The trading calendar for which we're writing data.
days : pd.DatetimeIndex
The days to write for.
data : iterable[(int, pd.DataFrame)]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzMinuteBarReader
_write = staticmethod(write_bcolz_minute_data)
class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader):
"""A temporary BcolzDailyBarReader object.
Parameters
----------
cal : TradingCalendar
The trading calendar for which we're writing data.
days : pd.DatetimeIndex
The days to write for.
data : dict[int -> pd.DataFrame]
The data to write.
path : str, optional
The path to the directory to write the data into. If not given, this
will be a unique name.
See Also
--------
tmp_bcolz_equity_daily_bar_reader
"""
_reader_cls = BcolzDailyBarReader
@staticmethod
def _write(cal, days, path, data):
BcolzDailyBarWriter(path, days).write(data)
@contextmanager
def patch_read_csv(url_map, module=pd, strict=False):
"""Patch pandas.read_csv to map lookups from url to another.
Parameters
----------
url_map : mapping[str or file-like object -> str or file-like object]
The mapping to use to redirect read_csv calls.
module : module, optional
The module to patch ``read_csv`` on. By default this is ``pandas``.
This should be set to another module if ``read_csv`` is early-bound
like ``from pandas import read_csv`` instead of late-bound like:
``import pandas as pd; pd.read_csv``.
strict : bool, optional
If true, then this will assert that ``read_csv`` is only called with
elements in the ``url_map``.
"""
read_csv = pd.read_csv
def patched_read_csv(filepath_or_buffer, *args, **kwargs):
if filepath_or_buffer in url_map:
return read_csv(url_map[filepath_or_buffer], *args, **kwargs)
elif not strict:
return read_csv(filepath_or_buffer, *args, **kwargs)
else:
raise AssertionError(
"attempted to call read_csv on %r which not in the url map"
% filepath_or_buffer,
)
with patch.object(module, "read_csv", patched_read_csv):
yield
@curry
def ensure_doctest(f, name=None):
"""Ensure that an object gets doctested. This is useful for instances
of objects like curry or partial which are not discovered by default.
Parameters
----------
f : any
The thing to doctest.
name : str, optional
The name to use in the doctest function mapping. If this is None,
Then ``f.__name__`` will be used.
Returns
-------
f : any
``f`` unchanged.
"""
sys._getframe(2).f_globals.setdefault("__test__", {})[
f.__name__ if name is None else name
] = f
return f
class RecordBatchBlotter(SimulationBlotter):
"""Blotter that tracks how its batch_order method was called."""
def __init__(self):
super(RecordBatchBlotter, self).__init__()
self.order_batch_called = []
def batch_order(self, *args, **kwargs):
self.order_batch_called.append((args, kwargs))
return super(RecordBatchBlotter, self).batch_order(*args, **kwargs)
class AssetID(CustomFactor):
"""
CustomFactor that returns the AssetID of each asset.
Useful for providing a Factor that produces a different value for each
asset.
"""
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets
class AssetIDPlusDay(CustomFactor):
window_length = 1
inputs = ()
def compute(self, today, assets, out):
out[:] = assets + today.day
class OpenPrice(CustomFactor):
window_length = 1
inputs = [EquityPricing.open]
def compute(self, today, assets, out, open):
out[:] = open
def prices_generating_returns(returns, starting_price):
"""Construct the time series of prices that produce the given returns.
Parameters
----------
returns : np.ndarray[float]
The returns that these prices generate.
starting_price : float
The value of the asset.
Returns
-------
prices : np.ndaray[float]
The prices that generate the given returns. This array will be one
element longer than ``returns`` and ``prices[0] == starting_price``.
"""
raw_prices = starting_price * (1 + np.append([0], returns)).cumprod()
rounded_prices = raw_prices.round(3)
if not np.allclose(raw_prices, rounded_prices):
raise ValueError(
"Prices only have 3 decimal places of precision. There is no valid"
" price series that generate these returns.",
)
return rounded_prices
def random_tick_prices(
starting_price, count, tick_size=0.01, tick_range=(-5, 7), seed=42
):
"""
Construct a time series of prices that ticks by a random multiple of
``tick_size`` every period.
Parameters
----------
starting_price : float
The first price of the series.
count : int
Number of price observations to return.
tick_size : float
Unit of price movement between observations.
tick_range : (int, int)
Pair of lower/upper bounds for different in the number of ticks
between price observations.
seed : int, optional
Seed to use for random number generation.
"""
out = np.full(count, starting_price, dtype=float)
rng = np.random.RandomState(seed)
diff = rng.randint(tick_range[0], tick_range[1], size=len(out) - 1)
ticks = starting_price + diff.cumsum() * tick_size
out[1:] = ticks
return out
def simulate_minutes_for_day(
open_, high, low, close, volume, trading_minutes=390, random_state=None
):
"""Generate a random walk of minute returns which meets the given OHLCV
profile for an asset. The volume will be evenly distributed through the
day.
Parameters
----------
open_ : float
The day's open.
high : float
The day's high.
low : float
The day's low.
close : float
The day's close.
volume : float
The day's volume.
trading_minutes : int, optional
The number of minutes to simulate.
random_state : numpy.random.RandomState, optional
The random state to use. If not provided, the global numpy state is
used.
"""
if random_state is None:
random_state = np.random
sub_periods = 5
values = (random_state.rand(trading_minutes * sub_periods) - 0.5).cumsum()
values *= (high - low) / (values.max() - values.min())
values += np.linspace(
open_ - values[0],
close - values[-1],
len(values),
)
assert np.allclose(open_, values[0])
assert np.allclose(close, values[-1])
max_ = max(close, open_)
where = values > max_
values[where] = (values[where] - max_) * (high - max_) / (
values.max() - max_
) + max_
min_ = min(close, open_)
where = values < min_
values[where] = (values[where] - min_) * (low - min_) / (values.min() - min_) + min_
if not (np.allclose(values.max(), high) and np.allclose(values.min(), low)):
return simulate_minutes_for_day(
open_,
high,
low,
close,
volume,
trading_minutes,
random_state=random_state,
)
prices = pd.Series(values.round(3)).groupby(
np.arange(trading_minutes).repeat(sub_periods),
)
base_volume, remainder = divmod(volume, trading_minutes)
volume = np.full(trading_minutes, base_volume, dtype="int64")
volume[:remainder] += 1
# TODO: add in volume
return pd.DataFrame(
{
"open": prices.first(),
"close": prices.last(),
"high": prices.max(),
"low": prices.min(),
"volume": volume,
}
)
def create_simple_domain(start, end, country_code):
"""Create a new pipeline domain with a simple date_range index."""
return EquitySessionDomain(pd.date_range(start, end), country_code)
def write_hdf5_daily_bars(
writer, asset_finder, country_codes, generate_data, generate_currency_codes
):
"""Write an HDF5 file of pricing data using an HDF5DailyBarWriter."""
asset_finder = asset_finder
for country_code in country_codes:
sids = asset_finder.equities_sids_for_country_code(country_code)
# XXX: The contract for generate_data is that it should return an
# iterator of (sid, df) pairs with entry for each sid in `sids`, and
# the contract for `generate_currency_codes` is that it should return a
# series indexed by the sids it receives.
#
# Unfortunately, some of our tests that were written before the
# introduction of multiple markets (in particular, the ones that use
# EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE), provide a function that always
# returns the same iterator, regardless of the provided `sids`, which
# means there are cases where the sids in `data` don't match the sids
# in `currency_codes`, which causes an assertion failure in
# `write_from_sid_df_pairs`.
#
# The correct fix for this is to update those old tests to respect
# `sids` (most likely by updating `make_equity_minute_bar_sids` to
# support multiple countries). But that requires updating a lot of
# tests, so for now, we call `generate_data` and use the sids it
# produces to determine what to pass to `generate_country_codes`.
data = list(generate_data(country_code=country_code, sids=sids))
data_sids = [p[0] for p in data]
currency_codes = generate_currency_codes(
country_code=country_code,
sids=data_sids,
)
writer.write_from_sid_df_pairs(
country_code,
iter(data),
currency_codes=currency_codes,
)
def exchange_info_for_domains(domains):
"""
Build an exchange_info suitable for passing to an AssetFinder from a list
of EquityCalendarDomain.
"""
return pd.DataFrame.from_records(
[
{
"exchange": domain.calendar.name,
"country_code": domain.country_code,
}
for domain in domains
]
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/testing/core.py | core.py |
import numpy as np
from zipline.pipeline.factors.factor import CustomFactor
from zipline.pipeline.classifiers.classifier import CustomClassifier
from zipline.utils.idbox import IDBox
from .predicates import assert_equal
class CheckWindowsMixin(object):
params = ("expected_windows",)
def compute(self, today, assets, out, input_, expected_windows):
for asset, expected_by_day in expected_windows:
expected_by_day = expected_by_day.ob
col_ix = np.searchsorted(assets, asset)
if assets[col_ix] != asset:
raise AssertionError("asset %s is not in the window" % asset)
try:
expected = expected_by_day[today]
except KeyError:
pass
else:
expected = np.asanyarray(expected)
actual = input_[:, col_ix]
assert_equal(
actual,
expected,
array_decimal=(6 if expected.dtype.kind == "f" else None),
)
# output is just latest
out[:] = input_[-1]
class CheckWindowsClassifier(CheckWindowsMixin, CustomClassifier):
"""A custom classifier that makes assertions about the lookback windows that
it gets passed.
Parameters
----------
input_ : Term
The input term to the classifier.
window_length : int
The length of the lookback window.
expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]]
For each asset, for each day, what the expected lookback window is.
Notes
-----
The output of this classifier is the same as ``Latest``. Any assets or days
not in ``expected_windows`` are not checked.
"""
def __new__(cls, input_, window_length, expected_windows):
if input_.dtype.kind == "V":
dtype = np.dtype("O")
else:
dtype = input_.dtype
return super(CheckWindowsClassifier, cls).__new__(
cls,
inputs=[input_],
dtype=dtype,
window_length=window_length,
expected_windows=frozenset(
(k, IDBox(v)) for k, v in expected_windows.items()
),
)
class CheckWindowsFactor(CheckWindowsMixin, CustomFactor):
"""A custom factor that makes assertions about the lookback windows that
it gets passed.
Parameters
----------
input_ : Term
The input term to the factor.
window_length : int
The length of the lookback window.
expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]]
For each asset, for each day, what the expected lookback window is.
Notes
-----
The output of this factor is the same as ``Latest``. Any assets or days
not in ``expected_windows`` are not checked.
"""
def __new__(cls, input_, window_length, expected_windows):
return super(CheckWindowsFactor, cls).__new__(
cls,
inputs=[input_],
dtype=input_.dtype,
window_length=window_length,
expected_windows=frozenset(
(k, IDBox(v)) for k, v in expected_windows.items()
),
) | zipline-tej | /zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/testing/pipeline_terms.py | pipeline_terms.py |
import errno
import os
import pkgutil
from importlib import import_module
import click
import logbook
import pandas as pd
from six import text_type
import zipline
from zipline.data import bundles as bundles_module
from trading_calendars import get_calendar
from zipline.utils.compat import wraps
from zipline.utils.cli import Date, Timestamp
from zipline.utils.run_algo import _run, BenchmarkSpec, load_extensions
from zipline.extensions import create_args
from zipline.gens import brokers
try:
__IPYTHON__
except NameError:
__IPYTHON__ = False
@click.group()
@click.option(
'-e',
'--extension',
multiple=True,
help='File or module path to a zipline extension to load.',
)
@click.option(
'--strict-extensions/--non-strict-extensions',
is_flag=True,
help='If --strict-extensions is passed then zipline will not '
'run if it cannot load all of the specified extensions. '
'If this is not passed or --non-strict-extensions is passed '
'then the failure will be logged but execution will continue.',
)
@click.option(
'--default-extension/--no-default-extension',
is_flag=True,
default=True,
help="Don't load the default zipline extension.py file in $ZIPLINE_HOME.",
)
@click.option(
'-x',
multiple=True,
help='Any custom command line arguments to define, in key=value form.'
)
@click.pass_context
def main(ctx, extension, strict_extensions, default_extension, x):
"""Top level zipline entry point.
"""
# install a logbook handler before performing any other operations
logbook.StderrHandler().push_application()
create_args(x, zipline.extension_args)
load_extensions(
default_extension,
extension,
strict_extensions,
os.environ,
)
def extract_option_object(option):
"""Convert a click.option call into a click.Option object.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
option_object : click.Option
The option object that this decorator will create.
"""
@option
def opt():
pass
return opt.__click_params__[0]
def ipython_only(option):
"""Mark that an option should only be exposed in IPython.
Parameters
----------
option : decorator
A click.option decorator.
Returns
-------
ipython_only_dec : decorator
A decorator that correctly applies the argument even when not
using IPython mode.
"""
if __IPYTHON__:
return option
argname = extract_option_object(option).name
def d(f):
@wraps(f)
def _(*args, **kwargs):
kwargs[argname] = None
return f(*args, **kwargs)
return _
return d
DEFAULT_BUNDLE = 'quantopian-quandl'
@main.command()
@click.option(
'-f',
'--algofile',
default=None,
type=click.File('r'),
help='The file that contains the algorithm to run.',
)
@click.option(
'-t',
'--algotext',
help='The algorithm script to run.',
)
@click.option(
'-D',
'--define',
multiple=True,
help="Define a name to be bound in the namespace before executing"
" the algotext. For example '-Dname=value'. The value may be any "
"python expression. These are evaluated in order so they may refer "
"to previously defined names.",
)
@click.option(
'--data-frequency',
type=click.Choice({'daily', 'minute'}),
default='daily',
show_default=True,
help='The data frequency of the simulation.',
)
@click.option(
'--capital-base',
type=float,
default=10e6,
show_default=True,
help='The starting capital for the simulation.',
)
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to use for the simulation.',
)
@click.option(
'--bundle-timestamp',
type=Timestamp(),
default=pd.Timestamp.utcnow(),
show_default=False,
help='The date to lookup data on or before.\n'
'[default: <current-time>]'
)
@click.option(
'-bf',
'--benchmark-file',
default=None,
type=click.Path(exists=True, dir_okay=False, readable=True, path_type=str),
help='The csv file that contains the benchmark returns',
)
@click.option(
'--benchmark-symbol',
default=None,
type=click.STRING,
help="The symbol of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
'--benchmark-sid',
default=None,
type=int,
help="The sid of the instrument to be used as a benchmark "
"(should exist in the ingested bundle)",
)
@click.option(
'--no-benchmark',
is_flag=True,
default=False,
help="If passed, use a benchmark of zero returns.",
)
@click.option(
'-s',
'--start',
type=Date(tz='utc', as_timestamp=True),
help='The start date of the simulation.',
)
@click.option(
'-e',
'--end',
type=Date(tz='utc', as_timestamp=True),
help='The end date of the simulation.',
)
@click.option(
'-o',
'--output',
default='-',
metavar='FILENAME',
show_default=True,
help="The location to write the perf data. If this is '-' the perf will"
" be written to stdout.",
)
@click.option(
'--trading-calendar',
metavar='TRADING-CALENDAR',
default='XNYS',
help="The calendar you want to use e.g. XLON. XNYS is the default."
)
@click.option(
'--print-algo/--no-print-algo',
is_flag=True,
default=False,
help='Print the algorithm to stdout.',
)
@click.option(
'--metrics-set',
default='default',
help='The metrics set to use. New metrics sets may be registered in your'
' extension.py.',
)
@click.option(
'--blotter',
default='default',
help="The blotter to use.",
show_default=True,
)
@ipython_only(click.option(
'--local-namespace/--no-local-namespace',
is_flag=True,
default=None,
help='Should the algorithm methods be resolved in the local namespace.'
))
@click.option(
'--broker',
default=None,
help='Broker'
)
@click.option(
'--broker-uri',
default=None,
metavar='BROKER-URI',
show_default=True,
help='Connection to broker',
)
@click.option(
'--state-file',
default=None,
metavar='FILENAME',
help='Filename where the state will be stored'
)
@click.option(
'--realtime-bar-target',
default=None,
metavar='DIRNAME',
help='Directory where the realtime collected minutely bars are saved'
)
@click.option(
'--list-brokers',
is_flag=True,
help='Get list of available brokers'
)
@click.pass_context
def run(ctx,
algofile,
algotext,
define,
data_frequency,
capital_base,
bundle,
bundle_timestamp,
benchmark_file,
benchmark_symbol,
benchmark_sid,
no_benchmark,
start,
end,
output,
trading_calendar,
print_algo,
metrics_set,
local_namespace,
blotter,
broker,
broker_uri,
state_file,
realtime_bar_target,
list_brokers):
"""Run a backtest for the given algorithm.
"""
if list_brokers:
click.echo("Supported brokers:")
for _, name, _ in pkgutil.iter_modules(brokers.__path__):
if name != 'broker':
click.echo(name)
return
# check that the start and end dates are passed correctly
if not broker and start is None and end is None:
# check both at the same time to avoid the case where a user
# does not pass either of these and then passes the first only
# to be told they need to pass the second argument also
ctx.fail(
"must specify dates with '-s' / '--start' and '-e' / '--end'",
)
if not broker and start is None:
ctx.fail("must specify a start date with '-s' / '--start'")
if not broker and end is None:
ctx.fail("must specify an end date with '-e' / '--end'")
if broker and broker_uri is None:
ctx.fail("must specify broker-uri if broker is specified")
if broker and state_file is None:
ctx.fail("must specify state-file with live trading")
if broker and realtime_bar_target is None:
ctx.fail("must specify realtime-bar-target with live trading")
brokerobj = None
if broker:
mod_name = 'zipline.gens.brokers.%s_broker' % broker.lower()
try:
bmod = import_module(mod_name)
except ImportError:
ctx.fail("unsupported broker: can't import module %s" % mod_name)
cl_name = '%sBroker' % broker.upper()
try:
bclass = getattr(bmod, cl_name)
except AttributeError:
ctx.fail("unsupported broker: can't import class %s from %s" %
(cl_name, mod_name))
brokerobj = bclass(broker_uri)
if end is None:
end = pd.Timestamp.utcnow() + pd.Timedelta(days=1, seconds=1) # Add 1-second to assure that end is > 1day
if (algotext is not None) == (algofile is not None):
ctx.fail(
"must specify exactly one of '-f' / '--algofile' or"
" '-t' / '--algotext'",
)
trading_calendar = get_calendar(trading_calendar)
benchmark_spec = BenchmarkSpec.from_cli_params(
no_benchmark=no_benchmark,
benchmark_sid=benchmark_sid,
benchmark_symbol=benchmark_symbol,
benchmark_file=benchmark_file,
)
return _run(
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
teardown=None,
algofile=algofile,
algotext=algotext,
defines=define,
data_frequency=data_frequency,
capital_base=capital_base,
bundle=bundle,
bundle_timestamp=bundle_timestamp,
start=start,
end=end,
output=output,
trading_calendar=trading_calendar,
print_algo=print_algo,
metrics_set=metrics_set,
local_namespace=local_namespace,
environ=os.environ,
blotter=blotter,
benchmark_spec=benchmark_spec,
broker=brokerobj,
state_filename=state_file,
realtime_bar_target=realtime_bar_target,
performance_callback=None,
stop_execution_callback=None,
execution_id=None
)
def zipline_magic(line, cell=None):
"""The zipline IPython cell magic.
"""
load_extensions(
default=True,
extensions=[],
strict=True,
environ=os.environ,
)
try:
return run.main(
# put our overrides at the start of the parameter list so that
# users may pass values with higher precedence
[
'--algotext', cell,
'--output', os.devnull, # don't write the results by default
] + ([
# these options are set when running in line magic mode
# set a non None algo text to use the ipython user_ns
'--algotext', '',
'--local-namespace',
] if cell is None else []) + line.split(),
'%s%%zipline' % ((cell or '') and '%'),
# don't use system exit and propogate errors to the caller
standalone_mode=False,
)
except SystemExit as e:
# https://github.com/mitsuhiko/click/pull/533
# even in standalone_mode=False `--help` really wants to kill us ;_;
if e.code:
raise ValueError('main returned non-zero status code: %d' % e.code)
@main.command()
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to ingest.',
)
@click.option(
'--assets-version',
type=int,
multiple=True,
help='Version of the assets db to which to downgrade.',
)
@click.option(
'--show-progress/--no-show-progress',
default=True,
help='Print progress information to the terminal.'
)
def ingest(bundle, assets_version, show_progress):
"""Ingest the data for the given bundle.
"""
bundles_module.ingest(
bundle,
os.environ,
pd.Timestamp.utcnow(),
assets_version,
show_progress,
)
@main.command()
@click.option(
'-b',
'--bundle',
default=DEFAULT_BUNDLE,
metavar='BUNDLE-NAME',
show_default=True,
help='The data bundle to clean.',
)
@click.option(
'-e',
'--before',
type=Timestamp(),
help='Clear all data before TIMESTAMP.'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-a',
'--after',
type=Timestamp(),
help='Clear all data after TIMESTAMP'
' This may not be passed with -k / --keep-last',
)
@click.option(
'-k',
'--keep-last',
type=int,
metavar='N',
help='Clear all but the last N downloads.'
' This may not be passed with -e / --before or -a / --after',
)
def clean(bundle, before, after, keep_last):
"""Clean up data downloaded with the ingest command.
"""
bundles_module.clean(
bundle,
before,
after,
keep_last,
)
@main.command()
def bundles():
"""List all of the available data bundles.
"""
for bundle in sorted(bundles_module.bundles.keys()):
if bundle.startswith('.'):
# hide the test data
continue
try:
ingestions = list(
map(text_type, bundles_module.ingestions_for_bundle(bundle))
)
except OSError as e:
if e.errno != errno.ENOENT:
raise
ingestions = []
# If we got no ingestions, either because the directory didn't exist or
# because there were no entries, print a single message indicating that
# no ingestions have yet been made.
for timestamp in ingestions or ["<no ingestions>"]:
click.echo("%s %s" % (bundle, timestamp))
if __name__ == '__main__':
main() | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/__main__.py | __main__.py |
import os.path
from datetime import datetime, timedelta
import logbook
import pandas as pd
import pytz
from dateutil.relativedelta import relativedelta
from zipline.finance.blotter.blotter_live import BlotterLive
from zipline.algorithm import TradingAlgorithm
from zipline.errors import ScheduleFunctionOutsideTradingStart
from zipline.gens.realtimeclock import RealtimeClock
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.utils.api_support import ZiplineAPI, \
allowed_only_in_before_trading_start, api_method
from zipline.utils.pandas_utils import normalize_date
from zipline.utils.serialization_utils import load_context, store_context
from zipline.finance.metrics import MetricsTracker, load as load_metrics_set
log = logbook.Logger("Live Trading")
# how many minutes before Trading starts needs the function before_trading_starts
# be launched
_minutes_before_trading_starts = 60*4
class LiveAlgorithmExecutor(AlgorithmSimulator):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
def _cleanup_expired_assets(self, dt, position_assets):
# In simulation this is used to close assets in the simulation end date, which makes a lot of sense.
# in our case, "simulation end" is set to 1 day from now (we might want to fix that in the future too) BUT,
# we don't really have a simulation end date, and we should let the algorithm decide when to close the assets.
pass
class LiveTradingAlgorithm(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.broker = kwargs.pop('broker', None)
self.orders = {}
self.algo_filename = kwargs.get('algo_filename', "<algorithm>")
self.state_filename = kwargs.pop('state_filename', None)
self.realtime_bar_target = kwargs.pop('realtime_bar_target', None)
# Persistence blacklist/whitelist and excludes gives a way to include/
# exclude (so do not persist on disk if initiated or excluded from the serialization
# function that reinstate or save the context variable to its last state).
# trading client can never be serialized, the initialized function and
# perf tracker remember the context variables and the past performance
# and need to be whitelisted
self._context_persistence_blacklist = ['trading_client']
self._context_persistence_whitelist = ['initialized', 'perf_tracker']
self._context_persistence_excludes = []
# blotter is always initialized to SimulationBlotter in run_algo.py.
# we override it here to use the LiveBlotter for live algos
blotter_live = BlotterLive(
data_frequency=kwargs['sim_params'].data_frequency,
broker=self.broker)
kwargs['blotter'] = blotter_live
super(self.__class__, self).__init__(*args, **kwargs)
log.info("initialization done")
def initialize(self, *args, **kwargs):
self._context_persistence_excludes = \
self._context_persistence_blacklist + \
[e for e in self.__dict__.keys()
if e not in self._context_persistence_whitelist]
if os.path.isfile(self.state_filename):
log.info("Loading state from {}".format(self.state_filename))
load_context(self.state_filename,
context=self,
checksum=self.algo_filename)
return
with ZiplineAPI(self):
super(self.__class__, self).initialize(*args, **kwargs)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def handle_data(self, data):
super(self.__class__, self).handle_data(data)
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def teardown(self):
super(self.__class__, self).teardown()
store_context(self.state_filename,
context=self,
checksum=self.algo_filename,
exclude_list=self._context_persistence_excludes)
def _create_clock(self):
# This method is taken from TradingAlgorithm.
# The clock has been replaced to use RealtimeClock
trading_o_and_c = self.trading_calendar.schedule.loc[
self.sim_params.sessions]
assert self.sim_params.emission_rate == 'minute'
minutely_emission = True
market_opens = trading_o_and_c['market_open']
market_closes = trading_o_and_c['market_close']
# The calendar's execution times are the minutes over which we actually
# want to run the clock. Typically the execution times simply adhere to
# the market open and close times. In the case of the futures calendar,
# for example, we only want to simulate over a subset of the full 24
# hour calendar, so the execution times dictate a market open time of
# 6:31am US/Eastern and a close of 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
before_trading_start_minutes = ((pd.to_datetime(execution_opens.values)
.tz_localize('UTC').tz_convert('US/Eastern') -
timedelta(minutes=_minutes_before_trading_starts))
.tz_convert('UTC'))
return RealtimeClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
time_skew=self.broker.time_skew,
is_broker_alive=self.broker.is_alive,
execution_id=self.sim_params._execution_id if hasattr(self.sim_params, "_execution_id") else None,
stop_execution_callback=self._stop_execution_callback
)
def _create_generator(self, sim_params):
# Call the simulation trading algorithm for side-effects:
# it creates the perf tracker
TradingAlgorithm._create_generator(self, self.sim_params)
# capital base is the ammount of money the algo can use
# it must be set with run_algorithm, and it's optional in cli mode with default value of 10 million
# please note that in python: 10**7 or 10e6 is 10 million or 10000000
# note2: the default value is defined in zipline/__main__.py under `--capital-base` option
# we need to support these scenarios:
# 1. cli mode with default param - we need to replace 10e6 with value from broker
# 2. run_algorithm or cli with specified value - if I have more than one algo running and I want to allocate
# a specific value for each algo, I cannot override it with value from broker because it will set to max val
# so, we will check if it's default value - assuming at this stage capital used for one algo will be less
# than 10e6, we will override it with value from broker. if it's specified to something else we will not change
# anything.
if self.metrics_tracker._capital_base == 10e6: # should be changed in the future with a centralized value
# the capital base is held in the metrics_tracker then the ledger then the Portfolio, so the best
# way to handle this, since it's used in many spots, is creating a new metrics_tracker with the new
# value. and ofc intialized relevant parts. this is copied from TradingAlgorithm._create_generator
self.metrics_tracker = metrics_tracker = self._create_live_metrics_tracker()
benchmark_source = self._create_benchmark_source()
metrics_tracker.handle_start_of_simulation(benchmark_source)
# attach metrics_tracker to broker
self.broker.set_metrics_tracker(self.metrics_tracker)
self.trading_client = LiveAlgorithmExecutor(
self,
sim_params,
self.data_portal,
self.trading_client.clock,
self._create_benchmark_source(),
self.restrictions,
universe_func=self._calculate_universe
)
return self.trading_client.transform()
def _create_live_metrics_tracker(self):
"""
creating the metrics_tracker but setting values from the broker and
not from the simulatio params
:return:
"""
account = self.broker.get_account_from_broker()
capital_base = float(account['NetLiquidation'])
return MetricsTracker(
trading_calendar=self.trading_calendar,
first_session=self.sim_params.start_session,
last_session=self.sim_params.end_session,
capital_base=capital_base,
emission_rate=self.sim_params.emission_rate,
data_frequency=self.sim_params.data_frequency,
asset_finder=self.asset_finder,
metrics=self._metrics_set,
)
def updated_portfolio(self):
return self.broker.portfolio
def updated_account(self):
return self.broker.account
@api_method
@allowed_only_in_before_trading_start(
ScheduleFunctionOutsideTradingStart())
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
# If the scheduled_function() is called from initalize()
# then the state persistence would need to take care of storing and
# restoring the scheduled functions too (as initialize() only called
# once in the algorithm's life). Persisting scheduled functions are
# difficult as they are not serializable by default.
# We enforce scheduled functions to be called only from
# before_trading_start() in live trading with a decorator.
super(self.__class__, self).schedule_function(func,
date_rule,
time_rule,
half_days,
calendar)
@api_method
def symbol(self, symbol_str):
# This method works around the problem of not being able to trade
# assets which does not have ingested data for the day of trade.
# Normally historical data is loaded to bundle and the asset's
# end_date and auto_close_date is set based on the last entry from
# the bundle db. LiveTradingAlgorithm does not override order_value(),
# order_percent() & order_target(). Those higher level ordering
# functions provide a safety net to not to trade de-listed assets.
# If the asset is returned as it was ingested (end_date=yesterday)
# then CannotOrderDelistedAsset exception will be raised from the
# higher level order functions.
#
# Hence, we are increasing the asset's end_date by 10 years.
asset = super(self.__class__, self).symbol(symbol_str)
tradeable_asset = asset.to_dict()
end_date = pd.Timestamp((datetime.utcnow() + relativedelta(years=10)).date()).replace(tzinfo=pytz.UTC)
tradeable_asset['end_date'] = end_date
tradeable_asset['auto_close_date'] = end_date
log.debug('Extended lifetime of asset {} to {}'.format(symbol_str,
tradeable_asset['end_date']))
return asset.from_dict(tradeable_asset)
def run(self, *args, **kwargs):
daily_stats = super(self.__class__, self).run(*args, **kwargs)
self.on_exit()
return daily_stats
def on_exit(self):
self.teardown()
if not self.realtime_bar_target:
return
log.info("Storing realtime bars to: {}".format(
self.realtime_bar_target))
today = str(pd.to_datetime('today').date())
subscribed_assets = self.broker.subscribed_assets
realtime_history = self.broker.get_realtime_bars(subscribed_assets,
'1m')
if not os.path.exists(self.realtime_bar_target):
os.mkdir(self.realtime_bar_target)
for asset in subscribed_assets:
filename = "ZL-%s-%s.csv" % (asset.symbol, today)
path = os.path.join(self.realtime_bar_target, filename)
realtime_history[asset].to_csv(path, mode='a',
index_label='datetime',
header=not os.path.exists(path))
def _pipeline_output(self, pipeline, chunks, name):
# This method is taken from TradingAlgorithm.
"""
Internal implementation of `pipeline_output`.
For Live Algo's we have to get the previous session as the Pipeline wont work without,
it will extrapolate such that it tries to get data for get_datetime which
is today
"""
today = normalize_date(self.get_datetime())
prev_session = normalize_date(self.trading_calendar.previous_open(today))
log.info('today in _pipeline_output : {}'.format(prev_session))
try:
data = self._pipeline_cache.get(name, prev_session)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, prev_session, next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[prev_session]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns)
def _sync_last_sale_prices(self, dt=None):
"""
we get the updates from the broker so we don't need to use this method which
tries to get it from the ingested data
:param dt:
:return:
"""
self.broker.positions | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/algorithm_live.py | algorithm_live.py |
import re
import six
from toolz import curry
def create_args(args, root):
"""
Encapsulates a set of custom command line arguments in key=value
or key.namespace=value form into a chain of Namespace objects,
where each next level is an attribute of the Namespace object on the
current level
Parameters
----------
args : list
A list of strings representing arguments in key=value form
root : Namespace
The top-level element of the argument tree
"""
extension_args = {}
for arg in args:
parse_extension_arg(arg, extension_args)
for name in sorted(extension_args, key=len):
path = name.split('.')
update_namespace(root, path, extension_args[name])
def parse_extension_arg(arg, arg_dict):
"""
Converts argument strings in key=value or key.namespace=value form
to dictionary entries
Parameters
----------
arg : str
The argument string to parse, which must be in key=value or
key.namespace=value form.
arg_dict : dict
The dictionary into which the key/value pair will be added
"""
match = re.match(r'^(([^\d\W]\w*)(\.[^\d\W]\w*)*)=(.*)$', arg)
if match is None:
raise ValueError(
"invalid extension argument '%s', must be in key=value form" % arg
)
name = match.group(1)
value = match.group(4)
arg_dict[name] = value
def update_namespace(namespace, path, name):
"""
A recursive function that takes a root element, list of namespaces,
and the value being stored, and assigns namespaces to the root object
via a chain of Namespace objects, connected through attributes
Parameters
----------
namespace : Namespace
The object onto which an attribute will be added
path : list
A list of strings representing namespaces
name : str
The value to be stored at the bottom level
"""
if len(path) == 1:
setattr(namespace, path[0], name)
else:
if hasattr(namespace, path[0]):
if isinstance(getattr(namespace, path[0]), six.string_types):
raise ValueError("Conflicting assignments at namespace"
" level '%s'" % path[0])
else:
a = Namespace()
setattr(namespace, path[0], a)
update_namespace(getattr(namespace, path[0]), path[1:], name)
class Namespace(object):
"""
A placeholder object representing a namespace level
"""
class Registry(object):
"""
Responsible for managing all instances of custom subclasses of a
given abstract base class - only one instance needs to be created
per abstract base class, and should be created through the
create_registry function/decorator. All management methods
for a given base class can be called through the global wrapper functions
rather than through the object instance itself.
Parameters
----------
interface : type
The abstract base class to manage.
"""
def __init__(self, interface):
self.interface = interface
self._factories = {}
def load(self, name):
"""Construct an object from a registered factory.
Parameters
----------
name : str
Name with which the factory was registered.
"""
try:
return self._factories[name]()
except KeyError:
raise ValueError(
"no %s factory registered under name %r, options are: %r" %
(self.interface.__name__, name, sorted(self._factories)),
)
def is_registered(self, name):
"""Check whether we have a factory registered under ``name``.
"""
return name in self._factories
@curry
def register(self, name, factory):
if self.is_registered(name):
raise ValueError(
"%s factory with name %r is already registered" %
(self.interface.__name__, name)
)
self._factories[name] = factory
return factory
def unregister(self, name):
try:
del self._factories[name]
except KeyError:
raise ValueError(
"%s factory %r was not already registered" %
(self.interface.__name__, name)
)
def clear(self):
self._factories.clear()
# Public wrapper methods for Registry:
def get_registry(interface):
"""
Getter method for retrieving the registry
instance for a given extendable type
Parameters
----------
interface : type
extendable type (base class)
Returns
-------
manager : Registry
The corresponding registry
"""
try:
return custom_types[interface]
except KeyError:
raise ValueError("class specified is not an extendable type")
def load(interface, name):
"""
Retrieves a custom class whose name is given.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be retrieved.
Returns
-------
obj : object
An instance of the desired class.
"""
return get_registry(interface).load(name)
@curry
def register(interface, name, custom_class):
"""
Registers a class for retrieval by the load method
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the subclass
custom_class : type
The class to register, which must be a subclass of the
abstract base class in self.dtype
"""
return get_registry(interface).register(name, custom_class)
def unregister(interface, name):
"""
If a class is registered with the given name,
it is unregistered.
Parameters
----------
interface : type
The base class for which to perform this operation
name : str
The name of the class to be unregistered.
"""
get_registry(interface).unregister(name)
def clear(interface):
"""
Unregisters all current registered classes
Parameters
----------
interface : type
The base class for which to perform this operation
"""
get_registry(interface).clear()
def create_registry(interface):
"""
Create a new registry for an extensible interface.
Parameters
----------
interface : type
The abstract data type for which to create a registry,
which will manage registration of factories for this type.
Returns
-------
interface : type
The data type specified/decorated, unaltered.
"""
if interface in custom_types:
raise ValueError('there is already a Registry instance '
'for the specified type')
custom_types[interface] = Registry(interface)
return interface
extensible = create_registry
# A global dictionary for storing instances of Registry:
custom_types = {} | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/extensions.py | extensions.py |
from textwrap import dedent
from zipline.utils.memoize import lazyval
class ZiplineError(Exception):
msg = None
def __init__(self, **kwargs):
self.kwargs = kwargs
@lazyval
def message(self):
return str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class ScheduleFunctionOutsideTradingStart(ZiplineError):
"""
Raised when an algorithm schedules functions outside of
before_trading_start()
"""
msg = "schedule_function() should only be called in before_trading_start()"
class NoTradeDataAvailable(ZiplineError):
pass
class NoTradeDataAvailableTooEarly(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It started trading on {start_dt}."
class NoTradeDataAvailableTooLate(NoTradeDataAvailable):
msg = "{sid} does not exist on {dt}. It stopped trading on {end_dt}."
class BenchmarkAssetNotAvailableTooEarly(NoTradeDataAvailableTooEarly):
pass
class BenchmarkAssetNotAvailableTooLate(NoTradeDataAvailableTooLate):
pass
class InvalidBenchmarkAsset(ZiplineError):
msg = """
{sid} cannot be used as the benchmark because it has a stock \
dividend on {dt}. Choose another asset to use as the benchmark.
""".strip()
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the set_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to set slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class IncompatibleSlippageModel(ZiplineError):
"""
Raised if a user tries to set a futures slippage model for equities or vice
versa.
"""
msg = """
You attempted to set an incompatible slippage model for {asset_type}. \
The slippage model '{given_model}' only supports {supported_asset_types}.
""".strip()
class SetSlippagePostInit(ZiplineError):
# Raised if a users script calls set_slippage magic
# after the initialize method has returned.
msg = """
You attempted to set slippage outside of `initialize`. \
You may only call 'set_slippage' in your initialize method.
""".strip()
class SetCancelPolicyPostInit(ZiplineError):
# Raised if a users script calls set_cancel_policy
# after the initialize method has returned.
msg = """
You attempted to set the cancel policy outside of `initialize`. \
You may only call 'set_cancel_policy' in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the set_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to set commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class IncompatibleCommissionModel(ZiplineError):
"""
Raised if a user tries to set a futures commission model for equities or
vice versa.
"""
msg = """
You attempted to set an incompatible commission model for {asset_type}. \
The commission model '{given_model}' only supports {supported_asset_types}.
""".strip()
class UnsupportedCancelPolicy(ZiplineError):
"""
Raised if a user script calls set_cancel_policy with an object that isn't
a CancelPolicy.
"""
msg = """
You attempted to set the cancel policy with an unsupported class. Please use
an instance of CancelPolicy.
""".strip()
class SetCommissionPostInit(ZiplineError):
"""
Raised if a users script calls set_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call 'set_commission' in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class CannotOrderDelistedAsset(ZiplineError):
"""
Raised if an order is for a delisted asset.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class SetBenchmarkOutsideInitialize(ZiplineError):
"""
Raised if set_benchmark is called outside initialize()
"""
msg = "'set_benchmark' can only be called within initialize function."
class ZeroCapitalError(ZiplineError):
"""
Raised if initial capital is set at or below zero
"""
msg = "initial capital base must be greater than zero"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class HistoryInInitialize(ZiplineError):
"""
Raised when an algorithm calls history() in initialize.
"""
msg = "history() should only be called in handle_data()"
class OrderInBeforeTradingStart(ZiplineError):
"""
Raised when an algorithm calls an order method in before_trading_start.
"""
msg = "Cannot place orders inside before_trading_start."
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to specify when the date symbol-lookup
should be valid.
Possible options: {options}
""".strip()
class MultipleSymbolsFoundForFuzzySymbol(MultipleSymbolsFound):
"""
Raised when a fuzzy symbol lookup is not resolvable without additional
information.
"""
msg = dedent("""\
Multiple symbols were found fuzzy matching the name '{symbol}'. Use
the as_of_date and/or country_code arguments to specify the date
and country for the symbol-lookup.
Possible options: {options}
""")
class SameSymbolUsedAcrossCountries(MultipleSymbolsFound):
"""
Raised when a symbol() call contains a symbol that is used in more than
one country and is thus not resolvable without a country_code.
"""
msg = dedent("""\
The symbol '{symbol}' is used in more than one country. Use the
country_code argument to specify the country.
Possible options by country: {options}
""")
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class ValueNotFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value does not exist for the specified mapping type.
"""
msg = """
Value '{value}' was not found for field '{field}'.
""".strip()
class MultipleValuesFoundForField(ZiplineError):
"""
Raised when a lookup_by_supplementary_mapping() call contains a
value that changed over time for the specified field and is
thus not resolvable without additional information provided via
as_of_date.
"""
msg = """
Multiple occurrences of the value '{value}' found for field '{field}'.
Use the 'as_of_date' or 'country_code' argument to specify when or where the
lookup should be valid.
Possible options: {options}
""".strip()
class NoValueForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a sid that
does not have a value for the specified mapping type.
"""
msg = """
No '{field}' value found for sid '{sid}'.
""".strip()
class MultipleValuesFoundForSid(ZiplineError):
"""
Raised when a get_supplementary_field() call contains a value that
changed over time for the specified field and is thus not resolvable
without additional information provided via as_of_date.
"""
msg = """
Multiple '{field}' values found for sid '{sid}'. Use the as_of_date' argument
to specify when the lookup should be valid.
Possible options: {options}
""".strip()
class SidsNotFound(ZiplineError):
"""
Raised when a retrieve_asset() or retrieve_all() call contains a
non-existent sid.
"""
@lazyval
def plural(self):
return len(self.sids) > 1
@lazyval
def sids(self):
return self.kwargs['sids']
@lazyval
def msg(self):
if self.plural:
return "No assets found for sids: {sids}."
return "No asset found for sid: {sids[0]}."
class EquitiesNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_equities` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No equities found for sids: {sids}."
return "No equity found for sid: {sids[0]}."
class FutureContractsNotFound(SidsNotFound):
"""
Raised when a call to `retrieve_futures_contracts` fails to find an asset.
"""
@lazyval
def msg(self):
if self.plural:
return "No future contracts found for sids: {sids}."
return "No future contract found for sid: {sids[0]}."
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class NonWindowSafeInput(ZiplineError):
"""
Raised when a Pipeline API term that is not deemed window safe is specified
as an input to another windowed term.
This is an error because it's generally not safe to compose windowed
functions on split/dividend adjusted data.
"""
msg = (
"Can't compute windowed expression {parent} with "
"windowed input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class NonPipelineInputs(ZiplineError):
"""
Raised when a non-pipeline object is passed as input to a ComputableTerm
"""
def __init__(self, term, inputs):
self.term = term
self.inputs = inputs
def __str__(self):
return (
"Unexpected input types in {}. "
"Inputs to Pipeline expressions must be Filters, Factors, "
"Classifiers, or BoundColumns.\n"
"Got the following type(s) instead: {}".format(
type(self.term).__name__,
sorted(set(map(type, self.inputs)), key=lambda t: t.__name__),
)
)
class TermOutputsEmpty(ZiplineError):
"""
Raised if a user attempts to construct a term with an empty outputs list.
"""
msg = (
"{termname} requires at least one output when passed an outputs "
"argument."
)
class InvalidOutputName(ZiplineError):
"""
Raised if a term's output names conflict with any of its attributes.
"""
msg = (
"{output_name!r} cannot be used as an output name for {termname}. "
"Output names cannot start with an underscore or be contained in the "
"following list: {disallowed_names}."
)
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying window
length and that term does not have a class-level default window length.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class InvalidTermParams(ZiplineError):
"""
Raised if a user attempts to construct a Term using ParameterizedTermMixin
without specifying a `params` list in the class body.
"""
msg = (
"Expected a list of strings as a class-level attribute for "
"{termname}.params, but got {value} instead."
)
class DTypeNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying dtype and
that term does not have class-level default dtype.
"""
msg = (
"{termname} requires a dtype, but no dtype was passed."
)
class NotDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that isn't a numpy
dtype object.
"""
msg = (
"{termname} expected a numpy dtype "
"object for a dtype, but got {dtype} instead."
)
class UnsupportedDType(ZiplineError):
"""
Raised when a pipeline Term is constructed with a dtype that's not
supported.
"""
msg = (
"Failed to construct {termname}.\n"
"Pipeline terms of dtype {dtype} are not yet supported."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and {upper_bound}, and min "
"must be less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AttachPipelineAfterInitialize(ZiplineError):
"""
Raised when a user tries to call add_pipeline outside of initialize.
"""
msg = (
"Attempted to attach a pipeline after initialize(). "
"attach_pipeline() can only be called during initialize."
)
class PipelineOutputDuringInitialize(ZiplineError):
"""
Raised when a user tries to call `pipeline_output` during initialize.
"""
msg = (
"Attempted to call pipeline_output() during initialize. "
"pipeline_output() can only be called once initialize has completed."
)
class NoSuchPipeline(ZiplineError, KeyError):
"""
Raised when a user tries to access a non-existent pipeline by name.
"""
msg = (
"No pipeline named '{name}' exists. Valid pipeline names are {valid}. "
"Did you forget to call attach_pipeline()?"
)
class DuplicatePipelineName(ZiplineError):
"""
Raised when a user tries to attach a pipeline with a name that already
exists for another attached pipeline.
"""
msg = (
"Attempted to attach pipeline named {name!r}, but the name already "
"exists for another pipeline. Please use a different name for this "
"pipeline."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by CustomFactors with unsupported dtypes.
"""
def __init__(self, hint='', **kwargs):
if hint:
hint = ' ' + hint
kwargs['hint'] = hint
super(UnsupportedDataType, self).__init__(**kwargs)
msg = "{typename} instances with dtype {dtype} are not supported.{hint}"
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
@classmethod
def from_lookback_window(cls,
initial_message,
first_date,
lookback_start,
lookback_length):
return cls(
msg=dedent(
"""
{initial_message}
lookback window started at {lookback_start}
earliest known date was {first_date}
{lookback_length} extra rows of data were required
"""
).format(
initial_message=initial_message,
first_date=first_date,
lookback_start=lookback_start,
lookback_length=lookback_length,
)
)
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class AssetDBVersionError(ZiplineError):
"""
Raised by an AssetDBWriter or AssetFinder if the version number in the
versions table does not match the ASSET_DB_VERSION in asset_writer.py.
"""
msg = (
"The existing Asset database has an incorrect version: {db_version}. "
"Expected version: {expected_version}. Try rebuilding your asset "
"database or updating your version of Zipline."
)
class AssetDBImpossibleDowngrade(ZiplineError):
msg = (
"The existing Asset database is version: {db_version} which is lower "
"than the desired downgrade version: {desired_version}."
)
class HistoryWindowStartsBeforeData(ZiplineError):
msg = (
"History window extends before {first_trading_day}. To use this "
"history window, start the backtest on or after {suggested_start_day}."
)
class NonExistentAssetInTimeFrame(ZiplineError):
msg = (
"The target asset '{asset}' does not exist for the entire timeframe "
"between {start_date} and {end_date}."
)
class InvalidCalendarName(ZiplineError):
"""
Raised when a calendar with an invalid name is requested.
"""
msg = (
"The requested TradingCalendar, {calendar_name}, does not exist."
)
class CalendarNameCollision(ZiplineError):
"""
Raised when the static calendar registry already has a calendar with a
given name.
"""
msg = (
"A calendar with the name {calendar_name} is already registered."
)
class CyclicCalendarAlias(ZiplineError):
"""
Raised when calendar aliases form a cycle.
"""
msg = "Cycle in calendar aliases: [{cycle}]"
class ScheduleFunctionWithoutCalendar(ZiplineError):
"""
Raised when schedule_function is called but there is not a calendar to be
used in the construction of an event rule.
"""
# TODO update message when new TradingSchedules are built
msg = (
"To use schedule_function, the TradingAlgorithm must be running on an "
"ExchangeTradingSchedule, rather than {schedule}."
)
class ScheduleFunctionInvalidCalendar(ZiplineError):
"""
Raised when schedule_function is called with an invalid calendar argument.
"""
msg = (
"Invalid calendar '{given_calendar}' passed to schedule_function. "
"Allowed options are {allowed_calendars}."
)
class UnsupportedPipelineOutput(ZiplineError):
"""
Raised when a 1D term is added as a column to a pipeline.
"""
msg = (
"Cannot add column {column_name!r} with term {term}. Adding slices or "
"single-column-output terms as pipeline columns is not currently "
"supported."
)
class NonSliceableTerm(ZiplineError):
"""
Raised when attempting to index into a non-sliceable term, e.g. instances
of `zipline.pipeline.term.LoadableTerm`.
"""
msg = "Taking slices of {term} is not currently supported."
class IncompatibleTerms(ZiplineError):
"""
Raised when trying to compute correlations/regressions between two 2D
factors with different masks.
"""
msg = (
"{term_1} and {term_2} must have the same mask in order to compute "
"correlations and regressions asset-wise."
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/errors.py | errors.py |
from warnings import warn
import pandas as pd
from .assets import Asset
from .utils.enum import enum
from ._protocol import BarData, InnerPosition # noqa
class MutableView(object):
"""A mutable view over an "immutable" object.
Parameters
----------
ob : any
The object to take a view over.
"""
# add slots so we don't accidentally add attributes to the view instead of
# ``ob``
__slots__ = ('_mutable_view_ob',)
def __init__(self, ob):
object.__setattr__(self, '_mutable_view_ob', ob)
def __getattr__(self, attr):
return getattr(self._mutable_view_ob, attr)
def __setattr__(self, attr, value):
vars(self._mutable_view_ob)[attr] = value
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self._mutable_view_ob)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION',
'CLOSE_POSITION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = [
'id',
'payment_sid',
'cash_amount',
'share_count',
]
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__.update(initial_values)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
def _deprecated_getitem_method(name, attrs):
"""Create a deprecated ``__getitem__`` method that tells users to use
getattr instead.
Parameters
----------
name : str
The name of the object in the warning message.
attrs : iterable[str]
The set of allowed attributes.
Returns
-------
__getitem__ : callable[any, str]
The ``__getitem__`` method to put in the class dict.
"""
attrs = frozenset(attrs)
msg = (
"'{name}[{attr!r}]' is deprecated, please use"
" '{name}.{attr}' instead"
)
def __getitem__(self, key):
"""``__getitem__`` is deprecated, please use attribute access instead.
"""
warn(msg.format(name=name, attr=key), DeprecationWarning, stacklevel=2)
if key in attrs:
return getattr(self, key)
raise KeyError(key)
return __getitem__
class Order(Event):
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'order', {
'dt',
'sid',
'amount',
'stop',
'limit',
'id',
'filled',
'commission',
'stop_reached',
'limit_reached',
'created',
},
)
class Portfolio(object):
"""Object providing read-only access to current portfolio state.
Parameters
----------
start_date : pd.Timestamp
The start date for the period being recorded.
capital_base : float
The starting value for the portfolio. This will be used as the starting
cash, current cash, and portfolio value.
Attributes
----------
positions : zipline.protocol.Positions
Dict-like object containing information about currently-held positions.
cash : float
Amount of cash currently held in portfolio.
portfolio_value : float
Current liquidation value of the portfolio's holdings.
This is equal to ``cash + sum(shares * price)``
starting_cash : float
Amount of cash in the portfolio at the start of the backtest.
"""
def __init__(self, start_date=None, capital_base=0.0):
self_ = MutableView(self)
self_.cash_flow = 0.0
self_.starting_cash = capital_base
self_.portfolio_value = capital_base
self_.pnl = 0.0
self_.returns = 0.0
self_.cash = capital_base
self_.positions = Positions()
self_.start_date = start_date
self_.positions_value = 0.0
self_.positions_exposure = 0.0
@property
def capital_used(self):
return self.cash_flow
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Portfolio objects')
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'portfolio', {
'capital_used',
'starting_cash',
'portfolio_value',
'pnl',
'returns',
'cash',
'positions',
'start_date',
'positions_value',
},
)
@property
def current_portfolio_weights(self):
"""
Compute each asset's weight in the portfolio by calculating its held
value divided by the total value of all positions.
Each equity's value is its price times the number of shares held. Each
futures contract's value is its unit price times number of shares held
times the multiplier.
"""
position_values = pd.Series({
asset: (
position.last_sale_price *
position.amount *
asset.price_multiplier
)
for asset, position in self.positions.items()
})
return position_values / self.portfolio_value
class Account(object):
"""
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
"""
def __init__(self):
self_ = MutableView(self)
self_.settled_cash = 0.0
self_.accrued_interest = 0.0
self_.buying_power = float('inf')
self_.equity_with_loan = 0.0
self_.total_positions_value = 0.0
self_.total_positions_exposure = 0.0
self_.regt_equity = 0.0
self_.regt_margin = float('inf')
self_.initial_margin_requirement = 0.0
self_.maintenance_margin_requirement = 0.0
self_.available_funds = 0.0
self_.excess_liquidity = 0.0
self_.cushion = 0.0
self_.day_trades_remaining = float('inf')
self_.leverage = 0.0
self_.net_leverage = 0.0
self_.net_liquidation = 0.0
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Account objects')
def __repr__(self):
return "Account({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'account', {
'settled_cash',
'accrued_interest',
'buying_power',
'equity_with_loan',
'total_positions_value',
'total_positions_exposure',
'regt_equity',
'regt_margin',
'initial_margin_requirement',
'maintenance_margin_requirement',
'available_funds',
'excess_liquidity',
'cushion',
'day_trades_remaining',
'leverage',
'net_leverage',
'net_liquidation',
},
)
class Position(object):
"""
A position held by an algorithm.
Attributes
----------
asset : zipline.assets.Asset
The held asset.
amount : int
Number of shares held. Short positions are represented with negative
values.
cost_basis : float
Average price at which currently-held shares were acquired.
last_sale_price : float
Most recent price for the position.
last_sale_date : pd.Timestamp
Datetime at which ``last_sale_price`` was last updated.
"""
__slots__ = ('_underlying_position',)
def __init__(self, underlying_position):
object.__setattr__(self, '_underlying_position', underlying_position)
def __getattr__(self, attr):
return getattr(self._underlying_position, attr)
def __setattr__(self, attr, value):
raise AttributeError('cannot mutate Position objects')
@property
def sid(self):
# for backwards compatibility
return self.asset
def __repr__(self):
return 'Position(%r)' % {
k: getattr(self, k)
for k in (
'asset',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
)
}
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
# Copied from Position and renamed. This is used to handle cases where a user
# does something like `context.portfolio.positions[100]` instead of
# `context.portfolio.positions[sid(100)]`.
class _DeprecatedSidLookupPosition(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
self.last_sale_date = None
def __repr__(self):
return "_DeprecatedSidLookupPosition({0})".format(self.__dict__)
# If you are adding new attributes, don't update this set. This method
# is deprecated to normal attribute access so we don't want to encourage
# new usages.
__getitem__ = _deprecated_getitem_method(
'position', {
'sid',
'amount',
'cost_basis',
'last_sale_price',
'last_sale_date',
},
)
class Positions(dict):
"""A dict-like object containing the algorithm's current positions.
"""
def __missing__(self, key):
if isinstance(key, Asset):
return Position(InnerPosition(key))
elif isinstance(key, int):
warn("Referencing positions by integer is deprecated."
" Use an asset instead.")
else:
warn("Position lookup expected a value of type Asset but got {0}"
" instead.".format(type(key).__name__))
return _DeprecatedSidLookupPosition(key) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/protocol.py | protocol.py |
from distutils.version import StrictVersion
import os
import numpy as np
# This is *not* a place to dump arbitrary classes/modules for convenience,
# it is a place to expose the public interfaces.
from trading_calendars import get_calendar
from . import data
from . import finance
from . import gens
from . import utils
from .utils.numpy_utils import numpy_version
from .utils.pandas_utils import new_pandas
from .utils.run_algo import run_algorithm
from ._version import get_versions
# These need to happen after the other imports.
from . algorithm import TradingAlgorithm
from . import api
from zipline import extensions as ext
from zipline.finance.blotter import Blotter
# PERF: Fire a warning if calendars were instantiated during zipline import.
# Having calendars doesn't break anything per-se, but it makes zipline imports
# noticeably slower, which becomes particularly noticeable in the Zipline CLI.
from trading_calendars.calendar_utils import global_calendar_dispatcher
if global_calendar_dispatcher._calendars:
import warnings
warnings.warn(
"Found TradingCalendar instances after zipline import.\n"
"Zipline startup will be much slower until this is fixed!",
)
del warnings
del global_calendar_dispatcher
__version__ = get_versions()['version']
del get_versions
extension_args = ext.Namespace()
def load_ipython_extension(ipython):
from .__main__ import zipline_magic
ipython.register_magic_function(zipline_magic, 'line_cell', 'zipline')
if os.name == 'nt':
# we need to be able to write to our temp directoy on windows so we
# create a subdir in %TMP% that has write access and use that as %TMP%
def _():
import atexit
import tempfile
tempfile.tempdir = tempdir = tempfile.mkdtemp()
@atexit.register
def cleanup_tempdir():
import shutil
shutil.rmtree(tempdir)
_()
del _
__all__ = [
'Blotter',
'TradingAlgorithm',
'api',
'data',
'finance',
'get_calendar',
'gens',
'run_algorithm',
'utils',
'extension_args'
]
def setup(self,
np=np,
numpy_version=numpy_version,
StrictVersion=StrictVersion,
new_pandas=new_pandas):
"""Lives in zipline.__init__ for doctests."""
if numpy_version >= StrictVersion('1.14'):
self.old_opts = np.get_printoptions()
np.set_printoptions(legacy='1.13')
else:
self.old_opts = None
if new_pandas:
self.old_err = np.geterr()
# old pandas has numpy compat that sets this
np.seterr(all='ignore')
else:
self.old_err = None
def teardown(self, np=np):
"""Lives in zipline.__init__ for doctests."""
if self.old_err is not None:
np.seterr(**self.old_err)
if self.old_opts is not None:
np.set_printoptions(**self.old_opts)
del os
del np
del numpy_version
del StrictVersion
del new_pandas | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/__init__.py | __init__.py |
from collections import Iterable, namedtuple
from copy import copy
import warnings
from datetime import tzinfo, time, timedelta
import logbook
import pytz
import pandas as pd
import numpy as np
from itertools import chain, repeat
from six import (
exec_,
iteritems,
itervalues,
string_types,
)
from trading_calendars.utils.pandas_utils import days_at_time
from trading_calendars import get_calendar
from zipline._protocol import handle_non_market_minutes
from zipline.errors import (
AttachPipelineAfterInitialize,
CannotOrderDelistedAsset,
DuplicatePipelineName,
HistoryInInitialize,
IncompatibleCommissionModel,
IncompatibleSlippageModel,
NoSuchPipeline,
OrderDuringInitialize,
OrderInBeforeTradingStart,
PipelineOutputDuringInitialize,
RegisterAccountControlPostInit,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetBenchmarkOutsideInitialize,
SetCancelPolicyPostInit,
SetCommissionPostInit,
SetSlippagePostInit,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
UnsupportedOrderParameters,
ZeroCapitalError
)
from zipline.finance.blotter import SimulationBlotter
from zipline.finance.controls import (
LongOnly,
MaxOrderCount,
MaxOrderSize,
MaxPositionSize,
MaxLeverage,
MinLeverage,
RestrictedListOrder
)
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.asset_restrictions import Restrictions
from zipline.finance.cancel_policy import NeverCancel, CancelPolicy
from zipline.finance.asset_restrictions import (
NoRestrictions,
StaticRestrictions,
SecurityListRestrictions,
)
from zipline.assets import Asset, Equity, Future
from zipline.gens.tradesimulation import AlgorithmSimulator
from zipline.finance.metrics import MetricsTracker, load as load_metrics_set
from zipline.pipeline import Pipeline
import zipline.pipeline.domain as domain
from zipline.pipeline.engine import (
ExplodingPipelineEngine,
SimplePipelineEngine,
)
from zipline.utils.api_support import (
api_method,
require_initialized,
require_not_initialized,
ZiplineAPI,
disallowed_in_before_trading_start)
from zipline.utils.compat import ExitStack
from zipline.utils.input_validation import (
coerce_string,
ensure_upper_case,
error_keywords,
expect_dtypes,
expect_types,
optional,
optionally,
)
from zipline.utils.numpy_utils import int64_dtype
from zipline.utils.pandas_utils import normalize_date
from zipline.utils.cache import ExpiringCache
from zipline.utils.pandas_utils import clear_dataframe_indexer_caches
import zipline.utils.events
from zipline.utils.events import (
EventManager,
make_eventrule,
date_rules,
time_rules,
calendars,
AfterOpen,
BeforeClose
)
from zipline.utils.math_utils import (
tolerant_equals,
round_if_near_integer,
)
from zipline.utils.preprocess import preprocess
from zipline.utils.security_list import SecurityList
import zipline.protocol
from zipline.sources.requests_csv import PandasRequestsCSV
from zipline.gens.sim_engine import MinuteSimulationClock
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.zipline_warnings import ZiplineDeprecationWarning
log = logbook.Logger("ZiplineLog")
# For creating and storing pipeline instances
AttachedPipeline = namedtuple('AttachedPipeline', 'pipe chunks eager')
class NoBenchmark(ValueError):
def __init__(self):
super(NoBenchmark, self).__init__(
'Must specify either benchmark_sid or benchmark_returns.',
)
class TradingAlgorithm(object):
"""A class that represents a trading strategy and parameters to execute
the strategy.
Parameters
----------
*args, **kwargs
Forwarded to ``initialize`` unless listed below.
initialize : callable[context -> None], optional
Function that is called at the start of the simulation to
setup the initial context.
handle_data : callable[(context, data) -> None], optional
Function called on every bar. This is where most logic should be
implemented.
before_trading_start : callable[(context, data) -> None], optional
Function that is called before any bars have been processed each
day.
analyze : callable[(context, DataFrame) -> None], optional
Function that is called at the end of the backtest. This is passed
the context and the performance results for the backtest.
teardown : algo method like handle_data() or before_trading_start() that
is called when the algo execution stops and allows the developer
to nicely kill the algo execution.
script : str, optional
Algoscript that contains the definitions for the four algorithm
lifecycle functions and any supporting code.
namespace : dict, optional
The namespace to execute the algoscript in. By default this is an
empty namespace that will include only python built ins.
algo_filename : str, optional
The filename for the algoscript. This will be used in exception
tracebacks. default: '<string>'.
data_frequency : {'daily', 'minute'}, optional
The duration of the bars.
performance_callback : callback[(perf) -> None], optional
A callback to send performance results everyday and not only at the end of the backtest.
this allows to run live, and monitor the performance of the algorithm daily
stop_execution_callback : callback[() -> bool], optional
A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation
could be stopped using this) execution. if the callback returns True, then algo execution will be aborted.
equities_metadata : dict or DataFrame or file-like object, optional
If dict is provided, it must have the following structure:
* keys are the identifiers
* values are dicts containing the metadata, with the metadata
field name as the key
If pandas.DataFrame is provided, it must have the
following structure:
* column names must be the metadata fields
* index must be the different asset identifiers
* array contents should be the metadata value
If an object with a ``read`` method is provided, ``read`` must
return rows containing at least one of 'sid' or 'symbol' along
with the other metadata fields.
futures_metadata : dict or DataFrame or file-like object, optional
The same layout as ``equities_metadata`` except that it is used
for futures information.
identifiers : list, optional
Any asset identifiers that are not provided in the
equities_metadata, but will be traded by this TradingAlgorithm.
get_pipeline_loader : callable[BoundColumn -> PipelineLoader], optional
The function that maps pipeline columns to their loaders.
create_event_context : callable[BarData -> context manager], optional
A function used to create a context mananger that wraps the
execution of all events that are scheduled for a bar.
This function will be passed the data for the bar and should
return the actual context manager that will be entered.
history_container_class : type, optional
The type of history container to use. default: HistoryContainer
platform : str, optional
The platform the simulation is running on. This can be queried for
in the simulation with ``get_environment``. This allows algorithms
to conditionally execute code based on platform it is running on.
default: 'zipline'
adjustment_reader : AdjustmentReader
The interface to the adjustments.
"""
def __init__(self,
sim_params,
data_portal=None,
asset_finder=None,
# Algorithm API
namespace=None,
script=None,
algo_filename=None,
initialize=None,
handle_data=None,
before_trading_start=None,
analyze=None,
teardown=None,
#
trading_calendar=None,
metrics_set=None,
blotter=None,
blotter_class=None,
cancel_policy=None,
benchmark_sid=None,
benchmark_returns=None,
platform='zipline',
capital_changes=None,
get_pipeline_loader=None,
create_event_context=None,
performance_callback=None,
stop_execution_callback=None,
**initialize_kwargs):
# List of trading controls to be used to validate orders.
self.trading_controls = []
# List of account controls to be checked on each bar.
self.account_controls = []
self._recorded_vars = {}
self.namespace = namespace or {}
self._platform = platform
self.logger = None
# XXX: This is kind of a mess.
# We support passing a data_portal in `run`, but we need an asset
# finder earlier than that to look up assets for things like
# set_benchmark.
self.data_portal = data_portal
if self.data_portal is None:
if asset_finder is None:
raise ValueError(
"Must pass either data_portal or asset_finder "
"to TradingAlgorithm()"
)
self.asset_finder = asset_finder
else:
# Raise an error if we were passed two different asset finders.
# There's no world where that's a good idea.
if asset_finder is not None \
and asset_finder is not data_portal.asset_finder:
raise ValueError(
"Inconsistent asset_finders in TradingAlgorithm()"
)
self.asset_finder = data_portal.asset_finder
self.benchmark_returns = benchmark_returns
# XXX: This is also a mess. We should remove all of this and only allow
# one way to pass a calendar.
#
# We have a required sim_params argument as well as an optional
# trading_calendar argument, but sim_params has a trading_calendar
# attribute. If the user passed trading_calendar explicitly, make sure
# it matches their sim_params. Otherwise, just use what's in their
# sim_params.
self.sim_params = sim_params
if trading_calendar is None:
self.trading_calendar = sim_params.trading_calendar
elif trading_calendar.name == sim_params.trading_calendar.name:
self.trading_calendar = sim_params.trading_calendar
else:
raise ValueError(
"Conflicting calendars: trading_calendar={}, but "
"sim_params.trading_calendar={}".format(
trading_calendar.name,
self.sim_params.trading_calendar.name,
)
)
self.metrics_tracker = None
self._last_sync_time = pd.NaT
self._metrics_set = metrics_set
if self._metrics_set is None:
self._metrics_set = load_metrics_set('default')
# Initialize Pipeline API data.
self.init_engine(get_pipeline_loader)
self._pipelines = {}
# Create an already-expired cache so that we compute the first time
# data is requested.
self._pipeline_cache = ExpiringCache(
cleanup=clear_dataframe_indexer_caches
)
if blotter is not None:
self.blotter = blotter
else:
cancel_policy = cancel_policy or NeverCancel()
blotter_class = blotter_class or SimulationBlotter
self.blotter = blotter_class(cancel_policy=cancel_policy)
# The symbol lookup date specifies the date to use when resolving
# symbols to sids, and can be set using set_symbol_lookup_date()
self._symbol_lookup_date = None
# If string is passed in, execute and get reference to
# functions.
self.algoscript = script
self._initialize = None
self._before_trading_start = None
self._analyze = None
self._performance_callback = None
self._stop_execution_callback = None
self._in_before_trading_start = False
self.event_manager = EventManager(create_event_context)
self._handle_data = None
def noop(*args, **kwargs):
pass
if self.algoscript is not None:
unexpected_api_methods = set()
if initialize is not None:
unexpected_api_methods.add('initialize')
if handle_data is not None:
unexpected_api_methods.add('handle_data')
if before_trading_start is not None:
unexpected_api_methods.add('before_trading_start')
if analyze is not None:
unexpected_api_methods.add('analyze')
if teardown is not None:
unexpected_api_methods.add('teardown')
if unexpected_api_methods:
raise ValueError(
"TradingAlgorithm received a script and the following API"
" methods as functions:\n{funcs}".format(
funcs=unexpected_api_methods,
)
)
if algo_filename is None:
algo_filename = '<string>'
code = compile(self.algoscript, algo_filename, 'exec')
exec_(code, self.namespace)
self._initialize = self.namespace.get('initialize', noop)
self._handle_data = self.namespace.get('handle_data', noop)
self._before_trading_start = self.namespace.get(
'before_trading_start',
)
# Optional analyze function, gets called after run
self._analyze = self.namespace.get('analyze')
self._teardown = self.namespace.get('teardown')
else:
self._initialize = initialize or (lambda self: None)
self._handle_data = handle_data
self._before_trading_start = before_trading_start
self._analyze = analyze
self._teardown = teardown
self._performance_callback = performance_callback
self._stop_execution_callback = stop_execution_callback
self.event_manager.add_event(
zipline.utils.events.Event(
zipline.utils.events.Always(),
# We pass handle_data.__func__ to get the unbound method.
# We will explicitly pass the algorithm to bind it again.
self.handle_data.__func__,
),
prepend=True,
)
if self.sim_params.capital_base <= 0:
raise ZeroCapitalError()
# Prepare the algo for initialization
self.initialized = False
self.initialize_kwargs = initialize_kwargs or {}
self.benchmark_sid = benchmark_sid
# A dictionary of capital changes, keyed by timestamp, indicating the
# target/delta of the capital changes, along with values
self.capital_changes = capital_changes or {}
# A dictionary of the actual capital change deltas, keyed by timestamp
self.capital_change_deltas = {}
self.restrictions = NoRestrictions()
self._backwards_compat_universe = None
def init_engine(self, get_loader):
"""
Construct and store a PipelineEngine from loader.
If get_loader is None, constructs an ExplodingPipelineEngine
"""
if get_loader is not None:
self.engine = SimplePipelineEngine(
get_loader,
self.asset_finder,
self.default_pipeline_domain(self.trading_calendar),
)
else:
self.engine = ExplodingPipelineEngine()
def initialize(self, *args, **kwargs):
"""
Call self._initialize with `self` made available to Zipline API
functions.
"""
with ZiplineAPI(self):
self._initialize(self, *args, **kwargs)
def before_trading_start(self, data):
self.compute_eager_pipelines()
if hasattr(self, "broker"):
# we are live, we need to updated our portfolio from the broker before we start
self.broker.positions
if self._before_trading_start is None:
return
self._in_before_trading_start = True
with handle_non_market_minutes(data) if \
self.data_frequency == "minute" else ExitStack():
self._before_trading_start(self, data)
self._in_before_trading_start = False
def handle_data(self, data):
if self._handle_data:
self._handle_data(self, data)
def teardown(self):
if self._teardown:
self._teardown(self)
def analyze(self, perf):
if self._analyze is None:
return
with ZiplineAPI(self):
self._analyze(self, perf)
def __repr__(self):
"""
N.B. this does not yet represent a string that can be used
to instantiate an exact copy of an algorithm.
However, it is getting close, and provides some value as something
that can be inspected interactively.
"""
return """
{class_name}(
capital_base={capital_base}
sim_params={sim_params},
initialized={initialized},
slippage_models={slippage_models},
commission_models={commission_models},
blotter={blotter},
recorded_vars={recorded_vars})
""".strip().format(class_name=self.__class__.__name__,
capital_base=self.sim_params.capital_base,
sim_params=repr(self.sim_params),
initialized=self.initialized,
slippage_models=repr(self.blotter.slippage_models),
commission_models=repr(self.blotter.commission_models),
blotter=repr(self.blotter),
recorded_vars=repr(self.recorded_vars))
def _create_clock(self):
"""
If the clock property is not set, then create one based on frequency.
"""
trading_o_and_c = self.trading_calendar.schedule.loc[
self.sim_params.sessions]
market_closes = trading_o_and_c['market_close']
minutely_emission = False
if self.sim_params.data_frequency == 'minute':
market_opens = trading_o_and_c['market_open']
minutely_emission = self.sim_params.emission_rate == "minute"
# The calendar's execution times are the minutes over which we
# actually want to run the clock. Typically the execution times
# simply adhere to the market open and close times. In the case of
# the futures calendar, for example, we only want to simulate over
# a subset of the full 24 hour calendar, so the execution times
# dictate a market open time of 6:31am US/Eastern and a close of
# 5:00pm US/Eastern.
execution_opens = \
self.trading_calendar.execution_time_from_open(market_opens)
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
else:
# in daily mode, we want to have one bar per session, timestamped
# as the last minute of the session.
execution_closes = \
self.trading_calendar.execution_time_from_close(market_closes)
execution_opens = execution_closes
# FIXME generalize these values
before_trading_start_minutes = days_at_time(
self.sim_params.sessions,
time(8, 45),
"US/Eastern"
)
return MinuteSimulationClock(
self.sim_params.sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission=minutely_emission,
)
def _create_benchmark_source(self):
if self.benchmark_sid is not None:
benchmark_asset = self.asset_finder.retrieve_asset(
self.benchmark_sid
)
benchmark_returns = None
else:
if self.benchmark_returns is None:
raise NoBenchmark()
benchmark_asset = None
benchmark_returns = self.benchmark_returns
return BenchmarkSource(
benchmark_asset=benchmark_asset,
benchmark_returns=benchmark_returns,
trading_calendar=self.trading_calendar,
sessions=self.sim_params.sessions,
data_portal=self.data_portal,
emission_rate=self.sim_params.emission_rate,
)
def _create_metrics_tracker(self):
return MetricsTracker(
trading_calendar=self.trading_calendar,
first_session=self.sim_params.start_session,
last_session=self.sim_params.end_session,
capital_base=self.sim_params.capital_base,
emission_rate=self.sim_params.emission_rate,
data_frequency=self.sim_params.data_frequency,
asset_finder=self.asset_finder,
metrics=self._metrics_set,
)
def _create_generator(self, sim_params):
if sim_params is not None:
self.sim_params = sim_params
self.metrics_tracker = metrics_tracker = self._create_metrics_tracker()
# Set the dt initially to the period start by forcing it to change.
self.on_dt_changed(self.sim_params.start_session)
if not self.initialized:
self.initialize(**self.initialize_kwargs)
self.initialized = True
benchmark_source = self._create_benchmark_source()
self.trading_client = AlgorithmSimulator(
self,
sim_params,
self.data_portal,
self._create_clock(),
benchmark_source,
self.restrictions,
universe_func=self._calculate_universe
)
metrics_tracker.handle_start_of_simulation(benchmark_source)
return self.trading_client.transform()
def _calculate_universe(self):
# this exists to provide backwards compatibility for older,
# deprecated APIs, particularly around the iterability of
# BarData (ie, 'for sid in data`).
if self._backwards_compat_universe is None:
self._backwards_compat_universe = (
self.asset_finder.retrieve_all(self.asset_finder.sids)
)
return self._backwards_compat_universe
def compute_eager_pipelines(self):
"""
Compute any pipelines attached with eager=True.
"""
for name, pipe in self._pipelines.items():
if pipe.eager:
self.pipeline_output(name)
def get_generator(self):
"""
Override this method to add new logic to the construction
of the generator. Overrides can use the _create_generator
method to get a standard construction generator.
"""
return self._create_generator(self.sim_params)
def run(self, data_portal=None):
"""Run the algorithm.
"""
# HACK: I don't think we really want to support passing a data portal
# this late in the long term, but this is needed for now for backwards
# compat downstream.
if data_portal is not None:
self.data_portal = data_portal
self.asset_finder = data_portal.asset_finder
elif self.data_portal is None:
raise RuntimeError(
"No data portal in TradingAlgorithm.run().\n"
"Either pass a DataPortal to TradingAlgorithm() or to run()."
)
else:
assert self.asset_finder is not None, \
"Have data portal without asset_finder."
# Create zipline and loop through simulated_trading.
# Each iteration returns a perf dictionary
try:
perfs = []
for perf in self.get_generator():
perfs.append(perf)
if self._performance_callback:
# this is called daily
self._performance_callback(perf)
# convert perf dict to pandas dataframe
daily_stats = self._create_daily_stats(perfs)
self.analyze(daily_stats)
finally:
self.data_portal = None
self.metrics_tracker = None
return daily_stats
def _create_daily_stats(self, perfs):
# create daily and cumulative stats dataframe
daily_perfs = []
# TODO: the loop here could overwrite expected properties
# of daily_perf. Could potentially raise or log a
# warning.
for perf in perfs:
if perf and 'daily_perf' in perf:
perf['daily_perf'].update(
perf['daily_perf'].pop('recorded_vars')
)
perf['daily_perf'].update(perf['cumulative_risk_metrics'])
daily_perfs.append(perf['daily_perf'])
else:
self.risk_report = perf
daily_dts = pd.DatetimeIndex(
[p['period_close'] for p in daily_perfs]
)
daily_stats = pd.DataFrame(daily_perfs, index=daily_dts)
return daily_stats
def calculate_capital_changes(self, dt, emission_rate, is_interday,
portfolio_value_adjustment=0.0):
"""
If there is a capital change for a given dt, this means the the change
occurs before `handle_data` on the given dt. In the case of the
change being a target value, the change will be computed on the
portfolio value according to prices at the given dt
`portfolio_value_adjustment`, if specified, will be removed from the
portfolio_value of the cumulative performance when calculating deltas
from target capital changes.
"""
try:
capital_change = self.capital_changes[dt]
except KeyError:
return
self._sync_last_sale_prices()
if capital_change['type'] == 'target':
target = capital_change['value']
capital_change_amount = (
target -
(
self.portfolio.portfolio_value -
portfolio_value_adjustment
)
)
log.info('Processing capital change to target %s at %s. Capital '
'change delta is %s' % (target, dt,
capital_change_amount))
elif capital_change['type'] == 'delta':
target = None
capital_change_amount = capital_change['value']
log.info('Processing capital change of delta %s at %s'
% (capital_change_amount, dt))
else:
log.error("Capital change %s does not indicate a valid type "
"('target' or 'delta')" % capital_change)
return
self.capital_change_deltas.update({dt: capital_change_amount})
self.metrics_tracker.capital_change(capital_change_amount)
yield {
'capital_change':
{'date': dt,
'type': 'cash',
'target': target,
'delta': capital_change_amount}
}
@api_method
def get_environment(self, field='platform'):
"""Query the execution environment.
Parameters
----------
field : {'platform', 'arena', 'data_frequency',
'start', 'end', 'capital_base', 'platform', '*'}
The field to query. The options have the following meanings:
arena : str
The arena from the simulation parameters. This will normally
be ``'backtest'`` but some systems may use this distinguish
live trading from backtesting.
data_frequency : {'daily', 'minute'}
data_frequency tells the algorithm if it is running with
daily data or minute data.
start : datetime
The start date for the simulation.
end : datetime
The end date for the simulation.
capital_base : float
The starting capital for the simulation.
platform : str
The platform that the code is running on. By default this
will be the string 'zipline'. This can allow algorithms to
know if they are running on the Quantopian platform instead.
* : dict[str -> any]
Returns all of the fields in a dictionary.
Returns
-------
val : any
The value for the field queried. See above for more information.
Raises
------
ValueError
Raised when ``field`` is not a valid option.
"""
env = {
'arena': self.sim_params.arena,
'data_frequency': self.sim_params.data_frequency,
'start': self.sim_params.first_open,
'end': self.sim_params.last_close,
'capital_base': self.sim_params.capital_base,
'platform': self._platform
}
if field == '*':
return env
else:
try:
return env[field]
except KeyError:
raise ValueError(
'%r is not a valid field for get_environment' % field,
)
@api_method
def fetch_csv(self,
url,
pre_func=None,
post_func=None,
date_column='date',
date_format=None,
timezone=pytz.utc.zone,
symbol=None,
mask=True,
symbol_column=None,
special_params_checker=None,
country_code=None,
**kwargs):
"""Fetch a csv from a remote url and register the data so that it is
queryable from the ``data`` object.
Parameters
----------
url : str
The url of the csv file to load.
pre_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow preprocessing the raw data returned from
fetch_csv before dates are paresed or symbols are mapped.
post_func : callable[pd.DataFrame -> pd.DataFrame], optional
A callback to allow postprocessing of the data after dates and
symbols have been mapped.
date_column : str, optional
The name of the column in the preprocessed dataframe containing
datetime information to map the data.
date_format : str, optional
The format of the dates in the ``date_column``. If not provided
``fetch_csv`` will attempt to infer the format. For information
about the format of this string, see :func:`pandas.read_csv`.
timezone : tzinfo or str, optional
The timezone for the datetime in the ``date_column``.
symbol : str, optional
If the data is about a new asset or index then this string will
be the name used to identify the values in ``data``. For example,
one may use ``fetch_csv`` to load data for VIX, then this field
could be the string ``'VIX'``.
mask : bool, optional
Drop any rows which cannot be symbol mapped.
symbol_column : str
If the data is attaching some new attribute to each asset then this
argument is the name of the column in the preprocessed dataframe
containing the symbols. This will be used along with the date
information to map the sids in the asset finder.
country_code : str, optional
Country code to use to disambiguate symbol lookups.
**kwargs
Forwarded to :func:`pandas.read_csv`.
Returns
-------
csv_data_source : zipline.sources.requests_csv.PandasRequestsCSV
A requests source that will pull data from the url specified.
"""
if country_code is None:
country_code = self.default_fetch_csv_country_code(
self.trading_calendar,
)
# Show all the logs every time fetcher is used.
csv_data_source = PandasRequestsCSV(
url,
pre_func,
post_func,
self.asset_finder,
self.trading_calendar.day,
self.sim_params.start_session,
self.sim_params.end_session,
date_column,
date_format,
timezone,
symbol,
mask,
symbol_column,
data_frequency=self.data_frequency,
country_code=country_code,
special_params_checker=special_params_checker,
**kwargs
)
# ingest this into dataportal
self.data_portal.handle_extra_source(csv_data_source.df,
self.sim_params)
return csv_data_source
def add_event(self, rule, callback):
"""Adds an event to the algorithm's EventManager.
Parameters
----------
rule : EventRule
The rule for when the callback should be triggered.
callback : callable[(context, data) -> None]
The function to execute when the rule is triggered.
"""
self.event_manager.add_event(
zipline.utils.events.Event(rule, callback),
)
@api_method
def schedule_function(self,
func,
date_rule=None,
time_rule=None,
half_days=True,
calendar=None):
"""
Schedule a function to be called repeatedly in the future.
Parameters
----------
func : callable
The function to execute when the rule is triggered. ``func`` should
have the same signature as ``handle_data``.
date_rule : zipline.utils.events.EventRule, optional
Rule for the dates on which to execute ``func``. If not
passed, the function will run every trading day.
time_rule : zipline.utils.events.EventRule, optional
Rule for the time at which to execute ``func``. If not passed, the
function will execute at the end of the first market minute of the
day.
half_days : bool, optional
Should this rule fire on half days? Default is True.
calendar : Sentinel, optional
Calendar used to compute rules that depend on the trading calendar.
See Also
--------
:class:`zipline.api.date_rules`
:class:`zipline.api.time_rules`
"""
# When the user calls schedule_function(func, <time_rule>), assume that
# the user meant to specify a time rule but no date rule, instead of
# a date rule and no time rule as the signature suggests
if isinstance(date_rule, (AfterOpen, BeforeClose)) and not time_rule:
warnings.warn('Got a time rule for the second positional argument '
'date_rule. You should use keyword argument '
'time_rule= when calling schedule_function without '
'specifying a date_rule', stacklevel=3)
date_rule = date_rule or date_rules.every_day()
time_rule = ((time_rule or time_rules.every_minute())
if self.sim_params.data_frequency == 'minute' else
# If we are in daily mode the time_rule is ignored.
time_rules.every_minute())
# Check the type of the algorithm's schedule before pulling calendar
# Note that the ExchangeTradingSchedule is currently the only
# TradingSchedule class, so this is unlikely to be hit
if calendar is None:
cal = self.trading_calendar
elif calendar is calendars.US_EQUITIES:
cal = get_calendar('XNYS')
elif calendar is calendars.US_FUTURES:
cal = get_calendar('us_futures')
else:
raise ScheduleFunctionInvalidCalendar(
given_calendar=calendar,
allowed_calendars=(
'[calendars.US_EQUITIES, calendars.US_FUTURES]'
),
)
self.add_event(
make_eventrule(date_rule, time_rule, cal, half_days),
func,
)
@api_method
def record(self, *args, **kwargs):
"""Track and record values each day.
Parameters
----------
**kwargs
The names and values to record.
Notes
-----
These values will appear in the performance packets and the performance
dataframe passed to ``analyze`` and returned from
:func:`~zipline.run_algorithm`.
"""
# Make 2 objects both referencing the same iterator
args = [iter(args)] * 2
# Zip generates list entries by calling `next` on each iterator it
# receives. In this case the two iterators are the same object, so the
# call to next on args[0] will also advance args[1], resulting in zip
# returning (a,b) (c,d) (e,f) rather than (a,a) (b,b) (c,c) etc.
positionals = zip(*args)
for name, value in chain(positionals, iteritems(kwargs)):
self._recorded_vars[name] = value
@api_method
def set_benchmark(self, benchmark):
"""Set the benchmark asset.
Parameters
----------
benchmark : zipline.assets.Asset
The asset to set as the new benchmark.
Notes
-----
Any dividends payed out for that new benchmark asset will be
automatically reinvested.
"""
if self.initialized:
raise SetBenchmarkOutsideInitialize()
self.benchmark_sid = benchmark
@api_method
@preprocess(root_symbol_str=ensure_upper_case)
def continuous_future(self,
root_symbol_str,
offset=0,
roll='volume',
adjustment='mul'):
"""Create a specifier for a continuous contract.
Parameters
----------
root_symbol_str : str
The root symbol for the future chain.
offset : int, optional
The distance from the primary contract. Default is 0.
roll_style : str, optional
How rolls are determined. Default is 'volume'.
adjustment : str, optional
Method for adjusting lookback prices between rolls. Options are
'mul', 'add', and None. Default is 'mul'.
Returns
-------
continuous_future : zipline.assets.ContinuousFuture
The continuous future specifier.
"""
return self.asset_finder.create_continuous_future(
root_symbol_str,
offset,
roll,
adjustment,
)
@api_method
@preprocess(
symbol_str=ensure_upper_case,
country_code=optionally(ensure_upper_case),
)
def symbol(self, symbol_str, country_code=None):
"""Lookup an Equity by its ticker symbol.
Parameters
----------
symbol_str : str
The ticker symbol for the equity to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equity : zipline.assets.Equity
The equity that held the ticker symbol on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when the symbols was not held on the current lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
# If the user has not set the symbol lookup date,
# use the end_session as the date for symbol->sid resolution.
_lookup_date = self._symbol_lookup_date \
if self._symbol_lookup_date is not None \
else self.sim_params.end_session
return self.asset_finder.lookup_symbol(
symbol_str,
as_of_date=_lookup_date,
country_code=country_code,
)
@api_method
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[zipline.assets.Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args]
@api_method
def sid(self, sid):
"""Lookup an Asset by its unique asset identifier.
Parameters
----------
sid : int
The unique integer that identifies an asset.
Returns
-------
asset : zipline.assets.Asset
The asset with the given ``sid``.
Raises
------
SidsNotFound
When a requested ``sid`` does not map to any asset.
"""
return self.asset_finder.retrieve_asset(sid)
@api_method
@preprocess(symbol=ensure_upper_case)
def future_symbol(self, symbol):
"""Lookup a futures contract with a given symbol.
Parameters
----------
symbol : str
The symbol of the desired contract.
Returns
-------
future : zipline.assets.Future
The future that trades with the name ``symbol``.
Raises
------
SymbolNotFound
Raised when no contract named 'symbol' is found.
"""
return self.asset_finder.lookup_future_symbol(symbol)
def _calculate_order_value_amount(self, asset, value):
"""
Calculates how many shares/contracts to order based on the type of
asset being ordered.
"""
# Make sure the asset exists, and that there is a last price for it.
# FIXME: we should use BarData's can_trade logic here, but I haven't
# yet found a good way to do that.
normalized_date = normalize_date(self.datetime)
if normalized_date < asset.start_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it started trading on"
" {1}.".format(asset.symbol, asset.start_date)
)
elif normalized_date > asset.end_date:
raise CannotOrderDelistedAsset(
msg="Cannot order {0}, as it stopped trading on"
" {1}.".format(asset.symbol, asset.end_date)
)
else:
last_price = \
self.trading_client.current_data.current(asset, "price")
if np.isnan(last_price):
raise CannotOrderDelistedAsset(
msg="Cannot order {0} on {1} as there is no last "
"price for the security.".format(asset.symbol,
self.datetime)
)
if tolerant_equals(last_price, 0):
zero_message = "Price of 0 for {psid}; can't infer value".format(
psid=asset
)
if self.logger:
self.logger.debug(zero_message)
# Don't place any order
return 0
value_multiplier = asset.price_multiplier
return value / (last_price * value_multiplier)
def _can_order_asset(self, asset):
if not isinstance(asset, Asset):
raise UnsupportedOrderParameters(
msg="Passing non-Asset argument to 'order()' is not supported."
" Use 'sid()' or 'symbol()' methods to look up an Asset."
)
if asset.auto_close_date:
day = normalize_date(self.get_datetime())
end_date = min(asset.end_date, asset.auto_close_date)
if isinstance(end_date, str):
from dateutil import parser
end_date = parser.parse(end_date).replace(tzinfo=pytz.UTC)
# when we use pipeline live the end date is always yesterday so we add 5 days to still keep this condition
# but also allowing to use pipeline live as well. 5 is a good number for weekends/holidays
if day > end_date + timedelta(days=5):
# If we are after the asset's end date or auto close date, warn
# the user that they can't place an order for this asset, and
# return None.
log.warn("Cannot place order for {0}, as it has de-listed. "
"Any existing positions for this asset will be "
"liquidated on "
"{1}.".format(asset.symbol, asset.auto_close_date))
return False
return True
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order(self,
asset,
amount,
limit_price=None,
stop_price=None,
style=None):
"""Place an order for a fixed number of shares.
Parameters
----------
asset : Asset
The asset to be ordered.
amount : int
The amount of shares to order. If ``amount`` is positive, this is
the number of shares to buy or cover. If ``amount`` is negative,
this is the number of shares to sell or short.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle, optional
The execution style for the order.
Returns
-------
order_id : str or None
The unique identifier for this order, or None if no order was
placed.
Notes
-----
The ``limit_price`` and ``stop_price`` arguments provide shorthands for
passing common execution styles. Passing ``limit_price=N`` is
equivalent to ``style=LimitOrder(N)``. Similarly, passing
``stop_price=M`` is equivalent to ``style=StopOrder(M)``, and passing
``limit_price=N`` and ``stop_price=M`` is equivalent to
``style=StopLimitOrder(N, M)``. It is an error to pass both a ``style``
and ``limit_price`` or ``stop_price``.
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order_value`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount, style = self._calculate_order(asset, amount,
limit_price, stop_price, style)
return self.blotter.order(asset, amount, style)
def _calculate_order(self, asset, amount,
limit_price=None, stop_price=None, style=None):
amount = self.round_order(amount)
# Raises a ZiplineError if invalid parameters are detected.
self.validate_order_params(asset,
amount,
limit_price,
stop_price,
style)
# Convert deprecated limit_price and stop_price parameters to use
# ExecutionStyle objects.
style = self.__convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style)
return amount, style
@staticmethod
def round_order(amount):
"""
Convert number of shares to an integer.
By default, truncates to the integer share count that's either within
.0001 of amount or closer to zero.
E.g. 3.9999 -> 4.0; 5.5 -> 5.0; -5.5 -> -5.0
"""
return int(round_if_near_integer(amount))
def validate_order_params(self,
asset,
amount,
limit_price,
stop_price,
style):
"""
Helper method for validating parameters to the order API function.
Raises an UnsupportedOrderParameters if invalid arguments are found.
"""
if not self.initialized:
raise OrderDuringInitialize(
msg="order() can only be called from within handle_data()"
)
if style:
if limit_price:
raise UnsupportedOrderParameters(
msg="Passing both limit_price and style is not supported."
)
if stop_price:
raise UnsupportedOrderParameters(
msg="Passing both stop_price and style is not supported."
)
for control in self.trading_controls:
control.validate(asset,
amount,
self.portfolio,
self.get_datetime(),
self.trading_client.current_data)
@staticmethod
def __convert_order_params_for_blotter(asset,
limit_price,
stop_price,
style):
"""
Helper method for converting deprecated limit_price and stop_price
arguments into ExecutionStyle instances.
This function assumes that either style == None or (limit_price,
stop_price) == (None, None).
"""
if style:
assert (limit_price, stop_price) == (None, None)
return style
if limit_price and stop_price:
return StopLimitOrder(limit_price, stop_price, asset=asset)
if limit_price:
return LimitOrder(limit_price, asset=asset)
if stop_price:
return StopOrder(stop_price, asset=asset)
else:
return MarketOrder()
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_value(self,
asset,
value,
limit_price=None,
stop_price=None,
style=None):
"""
Place an order for a fixed amount of money.
Equivalent to ``order(asset, value / data.current(asset, 'price'))``.
Parameters
----------
asset : Asset
The asset to be ordered.
value : float
Amount of value of ``asset`` to be transacted. The number of shares
bought or sold will be equal to ``value / current_price``.
limit_price : float, optional
Limit price for the order.
stop_price : float, optional
Stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_percent`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_value_amount(asset, value)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@property
def recorded_vars(self):
return copy(self._recorded_vars)
def _sync_last_sale_prices(self, dt=None):
"""Sync the last sale prices on the metrics tracker to a given
datetime.
Parameters
----------
dt : datetime
The time to sync the prices to.
Notes
-----
This call is cached by the datetime. Repeated calls in the same bar
are cheap.
"""
if dt is None:
dt = self.datetime
if dt != self._last_sync_time:
self.metrics_tracker.sync_last_sale_prices(
dt,
self.data_portal,
)
self._last_sync_time = dt
@property
def portfolio(self):
self._sync_last_sale_prices()
return self.metrics_tracker.portfolio
@property
def account(self):
self._sync_last_sale_prices()
return self.metrics_tracker.account
def set_logger(self, logger):
self.logger = logger
def on_dt_changed(self, dt):
"""
Callback triggered by the simulation loop whenever the current dt
changes.
Any logic that should happen exactly once at the start of each datetime
group should happen here.
"""
self.datetime = dt
self.blotter.set_date(dt)
@api_method
@preprocess(tz=coerce_string(pytz.timezone))
@expect_types(tz=optional(tzinfo))
def get_datetime(self, tz=None):
"""
Returns the current simulation datetime.
Parameters
----------
tz : tzinfo or str, optional
The timezone to return the datetime in. This defaults to utc.
Returns
-------
dt : datetime
The current simulation datetime converted to ``tz``.
"""
dt = self.datetime
assert dt.tzinfo == pytz.utc, "Algorithm should have a utc datetime"
if tz is not None:
dt = dt.astimezone(tz)
return dt
@api_method
def set_slippage(self, us_equities=None, us_futures=None):
"""
Set the slippage models for the simulation.
Parameters
----------
us_equities : EquitySlippageModel
The slippage model to use for trading US equities.
us_futures : FutureSlippageModel
The slippage model to use for trading US futures.
Notes
-----
This function can only be called during
:func:`~zipline.api.initialize`.
See Also
--------
:class:`zipline.finance.slippage.SlippageModel`
"""
if self.initialized:
raise SetSlippagePostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.slippage_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleSlippageModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.slippage_models[Future] = us_futures
@api_method
def set_commission(self, us_equities=None, us_futures=None):
"""Sets the commission models for the simulation.
Parameters
----------
us_equities : EquityCommissionModel
The commission model to use for trading US equities.
us_futures : FutureCommissionModel
The commission model to use for trading US futures.
Notes
-----
This function can only be called during
:func:`~zipline.api.initialize`.
See Also
--------
:class:`zipline.finance.commission.PerShare`
:class:`zipline.finance.commission.PerTrade`
:class:`zipline.finance.commission.PerDollar`
"""
if self.initialized:
raise SetCommissionPostInit()
if us_equities is not None:
if Equity not in us_equities.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='equities',
given_model=us_equities,
supported_asset_types=us_equities.allowed_asset_types,
)
self.blotter.commission_models[Equity] = us_equities
if us_futures is not None:
if Future not in us_futures.allowed_asset_types:
raise IncompatibleCommissionModel(
asset_type='futures',
given_model=us_futures,
supported_asset_types=us_futures.allowed_asset_types,
)
self.blotter.commission_models[Future] = us_futures
@api_method
def set_cancel_policy(self, cancel_policy):
"""Sets the order cancellation policy for the simulation.
Parameters
----------
cancel_policy : CancelPolicy
The cancellation policy to use.
See Also
--------
:class:`zipline.api.EODCancel`
:class:`zipline.api.NeverCancel`
"""
if not isinstance(cancel_policy, CancelPolicy):
raise UnsupportedCancelPolicy()
if self.initialized:
raise SetCancelPolicyPostInit()
self.blotter.cancel_policy = cancel_policy
@api_method
def set_symbol_lookup_date(self, dt):
"""Set the date for which symbols will be resolved to their assets
(symbols may map to different firms or underlying assets at
different times)
Parameters
----------
dt : datetime
The new symbol lookup date.
"""
try:
self._symbol_lookup_date = pd.Timestamp(dt)
except ValueError:
raise UnsupportedDatetimeFormat(input=dt,
method='set_symbol_lookup_date')
# Remain backwards compatibility
@property
def data_frequency(self):
return self.sim_params.data_frequency
@data_frequency.setter
def data_frequency(self, value):
assert value in ('daily', 'minute')
self.sim_params.data_frequency = value
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_percent(self,
asset,
percent,
limit_price=None,
stop_price=None,
style=None):
"""Place an order in the specified asset corresponding to the given
percent of the current portfolio value.
Parameters
----------
asset : Asset
The asset that this order is for.
percent : float
The percentage of the portfolio value to allocate to ``asset``.
This is specified as a decimal, for example: 0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_percent_amount(asset, percent)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_percent_amount(self, asset, percent):
value = self.portfolio.portfolio_value * percent
return self._calculate_order_value_amount(asset, value)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target number of shares. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target number of shares and the
current number of shares.
Parameters
----------
asset : Asset
The asset that this order is for.
target : int
The desired number of shares of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target`` does not take into account any open orders. For
example:
.. code-block:: python
order_target(sid(0), 10)
order_target(sid(0), 10)
This code will result in 20 shares of ``sid(0)`` because the first
call to ``order_target`` will not have been filled when the second
``order_target`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target_percent`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_target_amount(self, asset, target):
if asset in self.portfolio.positions:
current_position = self.portfolio.positions[asset].amount
target -= current_position
return target
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_value(self,
asset,
target,
limit_price=None,
stop_price=None,
style=None):
"""Place an order to adjust a position to a target value. If
the position doesn't already exist, this is equivalent to placing a new
order. If the position does exist, this is equivalent to placing an
order for the difference between the target value and the
current value.
If the Asset being ordered is a Future, the 'target value' calculated
is actually the target exposure, as Futures have no 'value'.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired total value of ``asset``.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_value(sid(0), 10)
order_target_value(sid(0), 10)
This code will result in 20 dollars of ``sid(0)`` because the first
call to ``order_target_value`` will not have been filled when the
second ``order_target_value`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_percent`
"""
if not self._can_order_asset(asset):
return None
target_amount = self._calculate_order_value_amount(asset, target)
amount = self._calculate_order_target_amount(asset, target_amount)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
@api_method
@disallowed_in_before_trading_start(OrderInBeforeTradingStart())
def order_target_percent(self, asset, target,
limit_price=None, stop_price=None, style=None):
"""Place an order to adjust a position to a target percent of the
current portfolio value. If the position doesn't already exist, this is
equivalent to placing a new order. If the position does exist, this is
equivalent to placing an order for the difference between the target
percent and the current percent.
Parameters
----------
asset : Asset
The asset that this order is for.
target : float
The desired percentage of the portfolio value to allocate to
``asset``. This is specified as a decimal, for example:
0.50 means 50%.
limit_price : float, optional
The limit price for the order.
stop_price : float, optional
The stop price for the order.
style : ExecutionStyle
The execution style for the order.
Returns
-------
order_id : str
The unique identifier for this order.
Notes
-----
``order_target_value`` does not take into account any open orders. For
example:
.. code-block:: python
order_target_percent(sid(0), 10)
order_target_percent(sid(0), 10)
This code will result in 20% of the portfolio being allocated to sid(0)
because the first call to ``order_target_percent`` will not have been
filled when the second ``order_target_percent`` call is made.
See :func:`zipline.api.order` for more information about
``limit_price``, ``stop_price``, and ``style``
See Also
--------
:class:`zipline.finance.execution.ExecutionStyle`
:func:`zipline.api.order`
:func:`zipline.api.order_target`
:func:`zipline.api.order_target_value`
"""
if not self._can_order_asset(asset):
return None
amount = self._calculate_order_target_percent_amount(asset, target)
return self.order(asset, amount,
limit_price=limit_price,
stop_price=stop_price,
style=style)
def _calculate_order_target_percent_amount(self, asset, target):
target_amount = self._calculate_order_percent_amount(asset, target)
return self._calculate_order_target_amount(asset, target_amount)
@api_method
@expect_types(share_counts=pd.Series)
@expect_dtypes(share_counts=int64_dtype)
def batch_market_order(self, share_counts):
"""Place a batch market order for multiple assets.
Parameters
----------
share_counts : pd.Series[Asset -> int]
Map from asset to number of shares to order for that asset.
Returns
-------
order_ids : pd.Index[str]
Index of ids for newly-created orders.
"""
style = MarketOrder()
order_args = [
(asset, amount, style)
for (asset, amount) in iteritems(share_counts)
if amount
]
return self.blotter.batch_order(order_args)
@error_keywords(sid='Keyword argument `sid` is no longer supported for '
'get_open_orders. Use `asset` instead.')
@api_method
def get_open_orders(self, asset=None):
"""Retrieve all of the current open orders.
Parameters
----------
asset : Asset
If passed and not None, return only the open orders for the given
asset instead of all open orders.
Returns
-------
open_orders : dict[list[Order]] or list[Order]
If no asset is passed this will return a dict mapping Assets
to a list containing all the open orders for the asset.
If an asset is passed then this will return a list of the open
orders for this asset.
"""
if asset is None:
return {
key: [order.to_api_obj() for order in orders]
for key, orders in iteritems(self.blotter.open_orders)
if orders
}
if asset in self.blotter.open_orders:
orders = self.blotter.open_orders[asset]
return [order.to_api_obj() for order in orders]
return []
@api_method
def get_order(self, order_id):
"""Lookup an order based on the order id returned from one of the
order functions.
Parameters
----------
order_id : str
The unique identifier for the order.
Returns
-------
order : Order
The order object.
"""
if order_id in self.blotter.orders:
return self.blotter.orders[order_id].to_api_obj()
@api_method
def cancel_order(self, order_param):
"""Cancel an open order.
Parameters
----------
order_param : str or Order
The order_id or order object to cancel.
"""
order_id = order_param
if isinstance(order_param, zipline.protocol.Order):
order_id = order_param.id
self.blotter.cancel(order_id)
@api_method
@require_initialized(HistoryInInitialize())
def history(self, bar_count, frequency, field, ffill=True):
"""DEPRECATED: use ``data.history`` instead.
"""
warnings.warn(
"The `history` method is deprecated. Use `data.history` instead.",
category=ZiplineDeprecationWarning,
stacklevel=4
)
return self.get_history_window(
bar_count,
frequency,
self._calculate_universe(),
field,
ffill
)
def get_history_window(self, bar_count, frequency, assets, field, ffill):
if not self._in_before_trading_start:
return self.data_portal.get_history_window(
assets,
self.datetime,
bar_count,
frequency,
field,
self.data_frequency,
ffill,
)
else:
# If we are in before_trading_start, we need to get the window
# as of the previous market minute
adjusted_dt = \
self.trading_calendar.previous_minute(
self.datetime
)
window = self.data_portal.get_history_window(
assets,
adjusted_dt,
bar_count,
frequency,
field,
self.data_frequency,
ffill,
)
# Get the adjustments between the last market minute and the
# current before_trading_start dt and apply to the window
adjs = self.data_portal.get_adjustments(
assets,
field,
adjusted_dt,
self.datetime
)
window = window * adjs
return window
####################
# Account Controls #
####################
def register_account_control(self, control):
"""
Register a new AccountControl to be checked on each bar.
"""
if self.initialized:
raise RegisterAccountControlPostInit()
self.account_controls.append(control)
def validate_account_controls(self):
for control in self.account_controls:
control.validate(self.portfolio,
self.account,
self.get_datetime(),
self.trading_client.current_data)
@api_method
def set_max_leverage(self, max_leverage):
"""Set a limit on the maximum leverage of the algorithm.
Parameters
----------
max_leverage : float
The maximum leverage for the algorithm. If not provided there will
be no maximum.
"""
control = MaxLeverage(max_leverage)
self.register_account_control(control)
@api_method
def set_min_leverage(self, min_leverage, grace_period):
"""Set a limit on the minimum leverage of the algorithm.
Parameters
----------
min_leverage : float
The minimum leverage for the algorithm.
grace_period : pd.Timedelta
The offset from the start date used to enforce a minimum leverage.
"""
deadline = self.sim_params.start_session + grace_period
control = MinLeverage(min_leverage, deadline)
self.register_account_control(control)
####################
# Trading Controls #
####################
def register_trading_control(self, control):
"""
Register a new TradingControl to be checked prior to order calls.
"""
if self.initialized:
raise RegisterTradingControlPostInit()
self.trading_controls.append(control)
@api_method
def set_max_position_size(self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value held for the
given sid. Limits are treated as absolute values and are enforced at
the time that the algo attempts to place an order for sid. This means
that it's possible to end up with more than the max number of shares
due to splits/dividends, and more than the max notional due to price
improvement.
If an algorithm attempts to place an order that would result in
increasing the absolute value of shares/dollar value exceeding one of
these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares to hold for an asset.
max_notional : float, optional
The maximum value to hold for an asset.
"""
control = MaxPositionSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_size(self,
asset=None,
max_shares=None,
max_notional=None,
on_error='fail'):
"""Set a limit on the number of shares and/or dollar value of any single
order placed for sid. Limits are treated as absolute values and are
enforced at the time that the algo attempts to place an order for sid.
If an algorithm attempts to place an order that would result in
exceeding one of these limits, raise a TradingControlException.
Parameters
----------
asset : Asset, optional
If provided, this sets the guard only on positions in the given
asset.
max_shares : int, optional
The maximum number of shares that can be ordered at one time.
max_notional : float, optional
The maximum value that can be ordered at one time.
"""
control = MaxOrderSize(asset=asset,
max_shares=max_shares,
max_notional=max_notional,
on_error=on_error)
self.register_trading_control(control)
@api_method
def set_max_order_count(self, max_count, on_error='fail'):
"""Set a limit on the number of orders that can be placed in a single
day.
Parameters
----------
max_count : int
The maximum number of orders that can be placed on any single day.
"""
control = MaxOrderCount(on_error, max_count)
self.register_trading_control(control)
@api_method
def set_do_not_order_list(self, restricted_list, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : container[Asset], SecurityList
The assets that cannot be ordered.
"""
if isinstance(restricted_list, SecurityList):
warnings.warn(
"`set_do_not_order_list(security_lists.leveraged_etf_list)` "
"is deprecated. Use `set_asset_restrictions("
"security_lists.restrict_leveraged_etfs)` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2
)
restrictions = SecurityListRestrictions(restricted_list)
else:
warnings.warn(
"`set_do_not_order_list(container_of_assets)` is deprecated. "
"Create a zipline.finance.asset_restrictions."
"StaticRestrictions object with a container of assets and use "
"`set_asset_restrictions(StaticRestrictions("
"container_of_assets))` instead.",
category=ZiplineDeprecationWarning,
stacklevel=2
)
restrictions = StaticRestrictions(restricted_list)
self.set_asset_restrictions(restrictions, on_error)
@api_method
@expect_types(
restrictions=Restrictions,
on_error=str,
)
def set_asset_restrictions(self, restrictions, on_error='fail'):
"""Set a restriction on which assets can be ordered.
Parameters
----------
restricted_list : Restrictions
An object providing information about restricted assets.
See Also
--------
zipline.finance.asset_restrictions.Restrictions
"""
control = RestrictedListOrder(on_error, restrictions)
self.register_trading_control(control)
self.restrictions |= restrictions
@api_method
def set_long_only(self, on_error='fail'):
"""Set a rule specifying that this algorithm cannot take short
positions.
"""
self.register_trading_control(LongOnly(on_error))
##############
# Pipeline API
##############
@api_method
@expect_types(
pipeline=Pipeline,
name=string_types,
chunks=(int, Iterable, type(None)),
)
def attach_pipeline(self, pipeline, name, chunks=None, eager=True):
"""Register a pipeline to be computed at the start of each day.
Parameters
----------
pipeline : Pipeline
The pipeline to have computed.
name : str
The name of the pipeline.
chunks : int or iterator, optional
The number of days to compute pipeline results for. Increasing
this number will make it longer to get the first results but
may improve the total runtime of the simulation. If an iterator
is passed, we will run in chunks based on values of the iterator.
Default is True.
eager : bool, optional
Whether or not to compute this pipeline prior to
before_trading_start.
Returns
-------
pipeline : Pipeline
Returns the pipeline that was attached unchanged.
See Also
--------
:func:`zipline.api.pipeline_output`
"""
if chunks is None:
# Make the first chunk smaller to get more immediate results:
# (one week, then every half year)
chunks = chain([5], repeat(126))
elif isinstance(chunks, int):
chunks = repeat(chunks)
if name in self._pipelines:
raise DuplicatePipelineName(name=name)
self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager)
log.info('Pipeline {} attached'.format(name))
# Return the pipeline to allow expressions like
# p = attach_pipeline(Pipeline(), 'name')
return pipeline
@api_method
@require_initialized(PipelineOutputDuringInitialize())
def pipeline_output(self, name):
"""
Get results of the pipeline attached by with name ``name``.
Parameters
----------
name : str
Name of the pipeline from which to fetch results.
Returns
-------
results : pd.DataFrame
DataFrame containing the results of the requested pipeline for
the current simulation date.
Raises
------
NoSuchPipeline
Raised when no pipeline with the name `name` has been registered.
See Also
--------
:func:`zipline.api.attach_pipeline`
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
try:
pipe, chunks, _ = self._pipelines[name]
except KeyError:
raise NoSuchPipeline(
name=name,
valid=list(self._pipelines.keys()),
)
return self._pipeline_output(pipe, chunks, name)
def _pipeline_output(self, pipeline, chunks, name):
"""
Internal implementation of `pipeline_output`.
"""
today = normalize_date(self.get_datetime())
try:
data = self._pipeline_cache.get(name, today)
except KeyError:
# Calculate the next block.
data, valid_until = self.run_pipeline(
pipeline, today, next(chunks),
)
self._pipeline_cache.set(name, data, valid_until)
# Now that we have a cached result, try to return the data for today.
try:
return data.loc[today]
except KeyError:
# This happens if no assets passed the pipeline screen on a given
# day.
return pd.DataFrame(index=[], columns=data.columns)
def run_pipeline(self, pipeline, start_session, chunksize):
"""
Compute `pipeline`, providing values for at least `start_date`.
Produces a DataFrame containing data for days between `start_date` and
`end_date`, where `end_date` is defined by:
`end_date = min(start_date + chunksize trading days,
simulation_end)`
Returns
-------
(data, valid_until) : tuple (pd.DataFrame, pd.Timestamp)
See Also
--------
PipelineEngine.run_pipeline
"""
sessions = self.trading_calendar.all_sessions
# Load data starting from the previous trading day...
start_date_loc = sessions.get_loc(start_session)
# ...continuing until either the day before the simulation end, or
# until chunksize days of data have been loaded.
sim_end_session = self.sim_params.end_session
end_loc = min(
start_date_loc + chunksize,
sessions.get_loc(sim_end_session)
)
end_session = sessions[end_loc]
return \
self.engine.run_pipeline(pipeline, start_session, end_session), \
end_session
@staticmethod
def default_pipeline_domain(calendar):
"""
Get a default pipeline domain for algorithms running on ``calendar``.
This will be used to infer a domain for pipelines that only use generic
datasets when running in the context of a TradingAlgorithm.
"""
return _DEFAULT_DOMAINS.get(calendar.name, domain.GENERIC)
@staticmethod
def default_fetch_csv_country_code(calendar):
"""
Get a default country_code to use for fetch_csv symbol lookups.
This will be used to disambiguate symbol lookups for fetch_csv calls if
our asset db contains entries with the same ticker spread across
multiple
"""
return _DEFAULT_FETCH_CSV_COUNTRY_CODES.get(calendar.name)
##################
# End Pipeline API
##################
@classmethod
def all_api_methods(cls):
"""
Return a list of all the TradingAlgorithm API methods.
"""
return [
fn for fn in itervalues(vars(cls))
if getattr(fn, 'is_api_method', False)
]
# Map from calendar name to default domain for that calendar.
_DEFAULT_DOMAINS = {d.calendar_name: d for d in domain.BUILT_IN_DOMAINS}
# Map from calendar name to default country code for that calendar.
_DEFAULT_FETCH_CSV_COUNTRY_CODES = {
d.calendar_name: d.country_code for d in domain.BUILT_IN_DOMAINS
}
# Include us_futures, which doesn't have a pipeline domain.
_DEFAULT_FETCH_CSV_COUNTRY_CODES['us_futures'] = 'US' | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/algorithm.py | algorithm.py |
from copy import copy
from logbook import Logger, Processor
from zipline.finance.order import ORDER_STATUS
from zipline.protocol import BarData
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.compat import ExitStack
from six import viewkeys
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR
)
log = Logger('Trade Simulation')
class AlgorithmSimulator(object):
EMISSION_TO_PERF_KEY_MAP = {
'minute': 'minute_perf',
'daily': 'daily_perf'
}
def __init__(self, algo, sim_params, data_portal, clock, benchmark_source,
restrictions, universe_func):
# ==============
# Simulation
# Param Setup
# ==============
self.sim_params = sim_params
self.data_portal = data_portal
self.restrictions = restrictions
# ==============
# Algo Setup
# ==============
self.algo = algo
# ==============
# Snapshot Setup
# ==============
# This object is the way that user algorithms interact with OHLCV data,
# fetcher data, and some API methods like `data.can_trade`.
self.current_data = self._create_bar_data(universe_func)
# We don't have a datetime for the current snapshot until we
# receive a message.
self.simulation_dt = None
self.clock = clock
self.benchmark_source = benchmark_source
# =============
# Logging Setup
# =============
# Processor function for injecting the algo_dt into
# user prints/logs.
def inject_algo_dt(record):
if 'algo_dt' not in record.extra:
record.extra['algo_dt'] = self.simulation_dt
self.processor = Processor(inject_algo_dt)
def get_simulation_dt(self):
return self.simulation_dt
def _create_bar_data(self, universe_func):
return BarData(
data_portal=self.data_portal,
simulation_dt_func=self.get_simulation_dt,
data_frequency=self.sim_params.data_frequency,
trading_calendar=self.algo.trading_calendar,
restrictions=self.restrictions,
universe_func=universe_func
)
def transform(self):
"""
Main generator work loop.
"""
algo = self.algo
metrics_tracker = algo.metrics_tracker
emission_rate = metrics_tracker.emission_rate
def every_bar(dt_to_use, current_data=self.current_data,
handle_data=algo.event_manager.handle_data):
for capital_change in calculate_minute_capital_changes(dt_to_use):
yield capital_change
self.simulation_dt = dt_to_use
# called every tick (minute or day).
algo.on_dt_changed(dt_to_use)
blotter = algo.blotter
# handle any transactions and commissions coming out new orders
# placed in the last bar
new_transactions, new_commissions, closed_orders = \
blotter.get_transactions(current_data)
blotter.prune_orders(closed_orders)
for transaction in new_transactions:
metrics_tracker.process_transaction(transaction)
# since this order was modified, record it
order = blotter.orders[transaction.order_id]
metrics_tracker.process_order(order)
for commission in new_commissions:
metrics_tracker.process_commission(commission)
if hasattr(algo, 'broker'):
algo.broker.positions
handle_data(algo, current_data, dt_to_use)
# grab any new orders from the blotter, then clear the list.
# this includes cancelled orders.
new_orders = blotter.new_orders
blotter.new_orders = []
# if we have any new orders, record them so that we know
# in what perf period they were placed.
for new_order in new_orders:
metrics_tracker.process_order(new_order)
def once_a_day(midnight_dt, current_data=self.current_data,
data_portal=self.data_portal):
# process any capital changes that came overnight
for capital_change in algo.calculate_capital_changes(
midnight_dt, emission_rate=emission_rate,
is_interday=True):
yield capital_change
# set all the timestamps
self.simulation_dt = midnight_dt
algo.on_dt_changed(midnight_dt)
metrics_tracker.handle_market_open(
midnight_dt,
algo.data_portal,
)
# handle any splits that impact any positions or any open orders.
assets_we_care_about = (
viewkeys(metrics_tracker.positions) |
viewkeys(algo.blotter.open_orders)
)
if assets_we_care_about:
splits = data_portal.get_splits(assets_we_care_about,
midnight_dt)
if splits:
algo.blotter.process_splits(splits)
metrics_tracker.handle_splits(splits)
def on_exit():
# Remove references to algo, data portal, et al to break cycles
# and ensure deterministic cleanup of these objects when the
# simulation finishes.
self.algo = None
self.benchmark_source = self.current_data = self.data_portal = None
with ExitStack() as stack:
stack.callback(on_exit)
stack.enter_context(self.processor)
stack.enter_context(ZiplineAPI(self.algo))
if algo.data_frequency == 'minute':
def execute_order_cancellation_policy():
algo.blotter.execute_cancel_policy(SESSION_END)
def calculate_minute_capital_changes(dt):
# process any capital changes that came between the last
# and current minutes
return algo.calculate_capital_changes(
dt, emission_rate=emission_rate, is_interday=False)
else:
def execute_order_cancellation_policy():
pass
def calculate_minute_capital_changes(dt):
return []
for dt, action in self.clock:
if action == BAR:
for capital_change_packet in every_bar(dt):
yield capital_change_packet
elif action == SESSION_START:
for capital_change_packet in once_a_day(dt):
yield capital_change_packet
elif action == SESSION_END:
# End of the session.
positions = metrics_tracker.positions
position_assets = algo.asset_finder.retrieve_all(positions)
self._cleanup_expired_assets(dt, position_assets)
execute_order_cancellation_policy()
algo.validate_account_controls()
yield self._get_daily_message(dt, algo, metrics_tracker)
elif action == BEFORE_TRADING_START_BAR:
self.simulation_dt = dt
algo.on_dt_changed(dt)
algo.before_trading_start(self.current_data)
elif action == MINUTE_END:
minute_msg = self._get_minute_message(
dt,
algo,
metrics_tracker,
)
yield minute_msg
risk_message = metrics_tracker.handle_simulation_end(
self.data_portal,
)
yield risk_message
def _cleanup_expired_assets(self, dt, position_assets):
"""
Clear out any assets that have expired before starting a new sim day.
Performs two functions:
1. Finds all assets for which we have open orders and clears any
orders whose assets are on or after their auto_close_date.
2. Finds all assets for which we have positions and generates
close_position events for any assets that have reached their
auto_close_date.
"""
algo = self.algo
def past_auto_close_date(asset):
acd = asset.auto_close_date
return acd is not None and acd <= dt
# Remove positions in any sids that have reached their auto_close date.
assets_to_clear = \
[asset for asset in position_assets if past_auto_close_date(asset)]
metrics_tracker = algo.metrics_tracker
data_portal = self.data_portal
for asset in assets_to_clear:
metrics_tracker.process_close_position(asset, dt, data_portal)
# Remove open orders for any sids that have reached their auto close
# date. These orders get processed immediately because otherwise they
# would not be processed until the first bar of the next day.
blotter = algo.blotter
assets_to_cancel = [
asset for asset in blotter.open_orders
if past_auto_close_date(asset)
]
for asset in assets_to_cancel:
blotter.cancel_all_orders_for_asset(asset)
# Make a copy here so that we are not modifying the list that is being
# iterated over.
for order in copy(blotter.new_orders):
if order.status == ORDER_STATUS.CANCELLED:
metrics_tracker.process_order(order)
blotter.new_orders.remove(order)
def _get_daily_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
perf_message = metrics_tracker.handle_market_close(
dt,
self.data_portal,
)
perf_message['daily_perf']['recorded_vars'] = algo.recorded_vars
return perf_message
def _get_minute_message(self, dt, algo, metrics_tracker):
"""
Get a perf message for the given datetime.
"""
rvars = algo.recorded_vars
minute_message = metrics_tracker.handle_minute_close(
dt,
self.data_portal,
)
minute_message['minute_perf']['recorded_vars'] = rvars
return minute_message | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/gens/tradesimulation.py | tradesimulation.py |
from time import sleep
from logbook import Logger
import pandas as pd
from zipline.gens.sim_engine import (
BAR,
SESSION_START,
SESSION_END,
MINUTE_END,
BEFORE_TRADING_START_BAR
)
log = Logger('Realtime Clock')
class RealtimeClock(object):
"""
Realtime clock for live trading.
This class is a drop-in replacement for
:class:`zipline.gens.sim_engine.MinuteSimulationClock`.
The key difference between the two is that the RealtimeClock's event
emission is synchronized to the (broker's) wall time clock, while
MinuteSimulationClock yields a new event on every iteration (regardless of
wall clock).
The :param:`time_skew` parameter represents the time difference between
the Broker and the live trading machine's clock.
"""
def __init__(self,
sessions,
execution_opens,
execution_closes,
before_trading_start_minutes,
minute_emission,
time_skew=pd.Timedelta("0s"),
is_broker_alive=None,
execution_id=None,
stop_execution_callback=None):
today = pd.to_datetime('now', utc=True).date()
beginning_of_today = pd.to_datetime(today, utc=True)
self.sessions = sessions[(beginning_of_today <= sessions)]
self.execution_opens = execution_opens[(beginning_of_today.tz_localize(None) <= execution_opens)]
self.execution_closes = execution_closes[(beginning_of_today.tz_localize(None) <= execution_closes)]
self.before_trading_start_minutes = before_trading_start_minutes[
(beginning_of_today <= before_trading_start_minutes)]
self.minute_emission = minute_emission
self.time_skew = time_skew
self.is_broker_alive = is_broker_alive or (lambda: True)
self._last_emit = None
self._before_trading_start_bar_yielded = False
self._execution_id = execution_id
self._stop_execution_callback = stop_execution_callback
def __iter__(self):
# yield from self.work_when_out_of_trading_hours()
# return
if not len(self.sessions):
return
for index, session in enumerate(self.sessions):
self._before_trading_start_bar_yielded = False
yield session, SESSION_START
if self._stop_execution_callback:
if self._stop_execution_callback(self._execution_id):
break
while self.is_broker_alive():
if self._stop_execution_callback: # put it here too, to break inner loop as well
if self._stop_execution_callback(self._execution_id):
break
current_time = pd.to_datetime('now', utc=True)
server_time = (current_time + self.time_skew).floor('1 min')
if (server_time >= self.before_trading_start_minutes[index] and
not self._before_trading_start_bar_yielded):
self._last_emit = server_time
self._before_trading_start_bar_yielded = True
yield server_time, BEFORE_TRADING_START_BAR
elif (server_time < self.execution_opens[index].tz_localize('UTC') and index == 0) or \
(self.execution_closes[index - 1].tz_localize('UTC') <= server_time <
self.execution_opens[index].tz_localize('UTC')):
# sleep anywhere between yesterday's close and today's open
sleep(1)
elif (self.execution_opens[index].tz_localize('UTC') <= server_time <
self.execution_closes[index].tz_localize('UTC')):
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
else:
sleep(1)
elif server_time == self.execution_closes[index].tz_localize('UTC'):
self._last_emit = server_time
yield server_time, BAR
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
break
elif server_time > self.execution_closes[index].tz_localize('UTC'):
break
else:
# We should never end up in this branch
raise RuntimeError("Invalid state in RealtimeClock")
def work_when_out_of_trading_hours(self):
"""
a debugging method to work while outside trading hours, so we are still able to make the engine work
:return:
"""
from datetime import timedelta
num_days = 5
from trading_calendars import get_calendar
self.sessions = get_calendar("NYSE").sessions_in_range(
str(pd.to_datetime('now', utc=True).date() - timedelta(days=num_days * 2)),
str(pd.to_datetime('now', utc=True).date() + timedelta(days=num_days * 2))
)
# for day in range(num_days, 0, -1):
for day in range(0, 1):
# current_time = pd.to_datetime('now', utc=True)
current_time = pd.to_datetime('2018/08/25', utc=True)
# server_time = (current_time + self.time_skew).floor('1 min') - timedelta(days=day)
server_time = (current_time + self.time_skew).floor('1 min') + timedelta(days=day)
# yield self.sessions[-1 - day], SESSION_START
yield self.sessions[day], SESSION_START
yield server_time, BEFORE_TRADING_START_BAR
should_end_day = True
counter = 0
num_minutes = 6 * 60
minute_list = []
for i in range(num_minutes + 1):
minute_list.append(pd.to_datetime("13:31", utc=True) + timedelta(minutes=i))
while self.is_broker_alive():
# current_time = pd.to_datetime('now', utc=True)
# server_time = (current_time + self.time_skew).floor('1 min')
# server_time = minute_list[counter] - timedelta(days=day)
server_time = minute_list[counter] + timedelta(days=day)
if counter >= num_minutes and should_end_day:
if self.minute_emission:
yield server_time, MINUTE_END
yield server_time, SESSION_END
break
if self._stop_execution_callback:
if self._stop_execution_callback(self._execution_id):
break
if (self._last_emit is None or
server_time - self._last_emit >=
pd.Timedelta('1 minute')):
self._last_emit = server_time
yield server_time, BAR
counter += 1
if self.minute_emission:
yield server_time, MINUTE_END
sleep(0.5) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/gens/realtimeclock.py | realtimeclock.py |
import alpaca_trade_api as tradeapi
from zipline.gens.brokers.broker import Broker
import zipline.protocol as zp
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
import pandas as pd
import numpy as np
import uuid
from logbook import Logger
import sys
if sys.version_info > (3,):
long = int
log = Logger('Alpaca Broker')
NY = 'America/New_York'
class ALPACABroker(Broker):
'''
Broker class for Alpaca.
The uri parameter is not used. Instead, the API key must be
set via environment variables (APCA_API_KEY_ID and APCA_API_SECRET_KEY).
Orders are identified by the UUID (v4) generated here and
associated in the broker side using client_order_id attribute.
Currently this class makes use of REST API only, but websocket
streaming can possibly used too.
'''
def __init__(self):
self._api = tradeapi.REST()
def subscribe_to_market_data(self, asset):
'''Do nothing to comply the interface'''
pass
def subscribed_assets(self):
'''Do nothing to comply the interface'''
return []
def set_metrics_tracker(self, metrics_tracker):
self.metrics_tracker = metrics_tracker
@property
def positions(self):
self._get_positions_from_broker()
return self.metrics_tracker.positions
@property
def portfolio(self):
account = self._api.get_account()
z_portfolio = zp.Portfolio()
z_portfolio.cash = float(account.cash)
z_portfolio.positions = self.positions
z_portfolio.positions_value = float(
account.portfolio_value) - float(account.cash)
z_portfolio.portfolio_value = float(account.portfolio_value)
return z_portfolio
@property
def account(self):
account = self._api.get_account()
z_account = zp.Account()
z_account.buying_power = float(account.cash)
z_account.total_position_value = float(
account.portfolio_value) - float(account.cash)
z_account.net_liquidation = account.portfolio_value
return z_account
@property
def time_skew(self):
return pd.Timedelta('0 sec') # TODO: use clock API
def is_alive(self):
try:
self._api.get_account()
return True
except BaseException:
return False
def _order2zp(self, order):
zp_order = ZPOrder(
id=order.client_order_id,
asset=symbol_lookup(order.symbol),
amount=int(order.qty) if order.side == 'buy' else -int(order.qty),
stop=float(order.stop_price) if order.stop_price else None,
limit=float(order.limit_price) if order.limit_price else None,
dt=order.submitted_at,
commission=0,
)
zp_order.status = ZP_ORDER_STATUS.OPEN
if order.canceled_at:
zp_order.status = ZP_ORDER_STATUS.CANCELLED
if order.failed_at:
zp_order.status = ZP_ORDER_STATUS.REJECTED
if order.filled_at:
zp_order.status = ZP_ORDER_STATUS.FILLED
zp_order.filled = int(order.filled_qty)
return zp_order
def _new_order_id(self):
return uuid.uuid4().hex
def order(self, asset, amount, style):
symbol = asset.symbol
qty = amount if amount > 0 else -amount
side = 'buy' if amount > 0 else 'sell'
order_type = 'market'
if isinstance(style, MarketOrder):
order_type = 'market'
elif isinstance(style, LimitOrder):
order_type = 'limit'
elif isinstance(style, StopOrder):
order_type = 'stop'
elif isinstance(style, StopLimitOrder):
order_type = 'stop_limit'
limit_price = style.get_limit_price(side == 'buy') or None
stop_price = style.get_stop_price(side == 'buy') or None
zp_order_id = self._new_order_id()
dt = pd.to_datetime('now', utc=True)
zp_order = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id,
)
order = self._api.submit_order(
symbol=symbol,
qty=qty,
side=side,
type=order_type,
time_in_force='day',
limit_price=limit_price,
stop_price=stop_price,
client_order_id=zp_order.id,
)
zp_order = self._order2zp(order)
return zp_order
@property
def orders(self):
orders = {}
for o in self._api.list_orders('all'):
try:
orders[o.client_order_id] = self._order2zp(o)
except:
continue
return orders
@property
def transactions(self):
orders = self._api.list_orders(status='closed')
results = {}
for order in orders:
if order.filled_at is None:
continue
tx = Transaction(
asset=symbol_lookup(order.symbol),
amount=int(order.filled_qty),
dt=order.filled_at,
price=float(order.filled_avg_price),
order_id=order.client_order_id)
results[order.client_order_id] = tx
return results
def cancel_order(self, zp_order_id):
try:
order = self._api.get_order_by_client_order_id(zp_order_id)
self._api.cancel_order(order.id)
except Exception as e:
log.error(e)
return
def get_last_traded_dt(self, asset):
quote = self._api.get_quote(asset.symbol)
return pd.Timestamp(quote.last_timestamp)
def get_spot_value(self, assets, field, dt, data_frequency):
assert(field in (
'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'))
assets_is_scalar = not isinstance(assets, (list, set, tuple))
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
if field in ('price', 'last_traded'):
try:
last_trade = self._api.get_last_trade(symbols[0])
return last_trade.price
except:
return np.nan
bars = self._api.get_barset(symbols, '1Min', limit=1).df
if bars.empty:
return np.nan
if not np.isnan(bars[assets.symbol][field]).all():
return float(bars[assets.symbol][field])
# if assets_is_scalar:
# if len(bars_list) == 0:
# return np.nan
# return bars_list[0].bars[-1]._raw[field]
# bars_map = {a.symbol: a for a in bars_list}
# return [
# bars_map[symbol].bars[-1]._raw[field]
# for symbol in symbols
# ]
def _get_positions_from_broker(self):
"""
get the positions from the broker and update zipline objects ( the ledger )
should be used once at startup and once every time we want to refresh the positions array
"""
cur_pos_in_tracker = self.metrics_tracker.positions
positions = self._api.list_positions()
for ap_position in positions:
# ap_position = positions[symbol]
try:
z_position = zp.Position(zp.InnerPosition(symbol_lookup(ap_position.symbol)))
editable_position = zp.MutableView(z_position)
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
log.warning('Wanted to subscribe to %s, but this asset is probably not ingested' % ap_position.symbol)
continue
if int(ap_position.qty) == 0:
continue
editable_position._underlying_position.amount = int(ap_position.qty)
editable_position._underlying_position.cost_basis = float(ap_position.avg_entry_price)
editable_position._underlying_position.last_sale_price = float(ap_position.current_price)
editable_position._underlying_position.last_sale_date = self._api.get_last_trade(ap_position.symbol).timestamp
self.metrics_tracker.update_position(z_position.asset,
amount=z_position.amount,
last_sale_price=z_position.last_sale_price,
last_sale_date=z_position.last_sale_date,
cost_basis=z_position.cost_basis)
# now let's sync the positions in the internal zipline objects
position_names = [p.symbol for p in positions]
assets_to_update = [] # separate list to not change list while iterating
for asset in cur_pos_in_tracker:
if asset.symbol not in position_names:
assets_to_update.append(asset)
for asset in assets_to_update:
# deleting object from the metrics_tracker as its not in the portfolio
self.metrics_tracker.update_position(asset,
amount=0)
# for some reason, the metrics tracker has self.positions AND self.portfolio.positions. let's make sure
# these objects are consistent
self.metrics_tracker._ledger._portfolio.positions = self.metrics_tracker.positions
def get_realtime_bars(self, assets, data_frequency):
# TODO: cache the result. The caller
# (DataPortalLive#get_history_window) makes use of only one
# column at a time.
assets_is_scalar = not isinstance(assets, (list, set, tuple, pd.Index))
is_daily = 'd' in data_frequency # 'daily' or '1d'
if assets_is_scalar:
symbols = [assets.symbol]
else:
symbols = [asset.symbol for asset in assets]
timeframe = '1D' if is_daily else '1Min'
df = self._api.get_barset(symbols, timeframe, limit=500).df
if not is_daily:
df = df.between_time("09:30", "16:00")
return df | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/gens/brokers/alpaca_broker.py | alpaca_broker.py |
import sys
from collections import namedtuple, defaultdict, OrderedDict
from time import sleep
from math import fabs
from six import iteritems, itervalues
import polling
import pandas as pd
import numpy as np
from zipline.gens.brokers.broker import Broker
from zipline.finance.order import (Order as ZPOrder,
ORDER_STATUS as ZP_ORDER_STATUS)
from zipline.finance.execution import (MarketOrder,
LimitOrder,
StopOrder,
StopLimitOrder)
from zipline.finance.transaction import Transaction
import zipline.protocol as zp
from zipline.protocol import MutableView
from zipline.api import symbol as symbol_lookup
from zipline.errors import SymbolNotFound
from ib.ext.EClientSocket import EClientSocket
from ib.ext.EWrapper import EWrapper
from ib.ext.Contract import Contract
from ib.ext.Order import Order
from ib.ext.ExecutionFilter import ExecutionFilter
from ib.ext.EClientErrors import EClientErrors
from logbook import Logger
if sys.version_info > (3,):
long = int
log = Logger('IB Broker')
Position = namedtuple('Position', ['contract', 'position', 'market_price',
'market_value', 'average_cost',
'unrealized_pnl', 'realized_pnl',
'account_name'])
_max_wait_subscribe = 10 # how many cycles to wait
_connection_timeout = 15 # Seconds
_poll_frequency = 0.1
symbol_to_exchange = defaultdict(lambda: 'SMART')
symbol_to_exchange['VIX'] = 'CBOE'
symbol_to_exchange['SPX'] = 'CBOE'
symbol_to_exchange['VIX3M'] = 'CBOE'
symbol_to_exchange['VXST'] = 'CBOE'
symbol_to_exchange['VXMT'] = 'CBOE'
symbol_to_exchange['GVZ'] = 'CBOE'
symbol_to_exchange['GLD'] = 'ARCA'
symbol_to_exchange['GDX'] = 'ARCA'
symbol_to_exchange['GPRO'] = 'SMART/NASDAQ'
symbol_to_exchange['MSFT'] = 'SMART/NASDAQ'
symbol_to_exchange['CSCO'] = 'SMART/NASDAQ'
symbol_to_sec_type = defaultdict(lambda: 'STK')
symbol_to_sec_type['VIX'] = 'IND'
symbol_to_sec_type['VIX3M'] = 'IND'
symbol_to_sec_type['VXST'] = 'IND'
symbol_to_sec_type['VXMT'] = 'IND'
symbol_to_sec_type['GVZ'] = 'IND'
symbol_to_sec_type['SPX'] = 'IND'
def log_message(message, mapping):
try:
del (mapping['self'])
except (KeyError,):
pass
items = list(mapping.items())
items.sort()
log.debug(('### %s' % (message,)))
for k, v in items:
log.debug((' %s:%s' % (k, v)))
def _method_params_to_dict(args):
return {k: v
for k, v in iteritems(args)
if k != 'self'}
class TWSConnection(EClientSocket, EWrapper):
def __init__(self, tws_uri):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
EWrapper.__init__(self)
EClientSocket.__init__(self, anyWrapper=self)
self.tws_uri = tws_uri
host, port, client_id = self.tws_uri.split(':')
self._host = host
self._port = int(port)
self.client_id = int(client_id)
self._next_ticker_id = 0
self._next_request_id = 0
self._next_order_id = None
self.managed_accounts = None
self.symbol_to_ticker_id = {}
self.ticker_id_to_symbol = {}
self.last_tick = defaultdict(dict)
self.bars = {}
# accounts structure: accounts[account_id][currency][value]
self.accounts = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: np.NaN)))
self.accounts_download_complete = False
self.positions = {}
self.portfolio = {}
self.open_orders = {}
self.order_statuses = {}
self.executions = defaultdict(OrderedDict)
self.commissions = defaultdict(OrderedDict)
self._execution_to_order_id = {}
self.time_skew = None
self.unrecoverable_error = False
self.connect()
def connect(self):
log.info("Connecting: {}:{}:{}".format(self._host, self._port,
self.client_id))
self.eConnect(self._host, self._port, self.client_id)
timeout = _connection_timeout
while timeout > 0 and not self.isConnected():
sleep(_poll_frequency)
timeout -= _poll_frequency
else:
if not self.isConnected():
raise SystemError("Connection timeout during TWS connection!")
self._download_account_details()
log.info("Managed accounts: {}".format(self.managed_accounts))
self.reqCurrentTime()
self.reqIds(1)
while self.time_skew is None or self._next_order_id is None:
sleep(_poll_frequency)
log.info("Local-Broker Time Skew: {}".format(self.time_skew))
def _download_account_details(self):
exec_filter = ExecutionFilter()
exec_filter.m_clientId = self.client_id
self.reqExecutions(self.next_request_id, exec_filter)
self.reqManagedAccts()
while self.managed_accounts is None:
sleep(_poll_frequency)
for account in self.managed_accounts:
self.reqAccountUpdates(subscribe=True, acctCode=account)
while self.accounts_download_complete is False:
sleep(_poll_frequency)
@property
def next_ticker_id(self):
ticker_id = self._next_ticker_id
self._next_ticker_id += 1
return ticker_id
@property
def next_request_id(self):
request_id = self._next_request_id
self._next_request_id += 1
return request_id
@property
def next_order_id(self):
order_id = self._next_order_id
self._next_order_id += 1
return order_id
def subscribe_to_market_data(self,
symbol,
sec_type='STK',
exchange='SMART',
currency='USD'):
if symbol in self.symbol_to_ticker_id:
# Already subscribed to market data
return
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = symbol_to_sec_type[symbol]
contract.m_exchange = symbol_to_exchange[symbol]
contract.m_currency = currency
ticker_id = self.next_ticker_id
self.symbol_to_ticker_id[symbol] = ticker_id
self.ticker_id_to_symbol[ticker_id] = symbol
# INDEX tickers cannot be requested with market data. The data can,
# however, be requested with realtimeBars. This change will make
# sure we can request data from INDEX tickers like SPX, VIX, etc.
if contract.m_secType == 'IND':
self.reqRealTimeBars(ticker_id, contract, 60, 'TRADES', True)
else:
tick_list = "233" # RTVolume, return tick_type == 48
self.reqMktData(ticker_id, contract, tick_list, False)
sleep(11)
def _process_tick(self, ticker_id, tick_type, value):
try:
symbol = self.ticker_id_to_symbol[ticker_id]
except KeyError:
log.error("Tick {} for id={} is not registered".format(tick_type,
ticker_id))
return
if tick_type == 48:
# RT Volume Bar. Format:
# Last trade price; Last trade size;Last trade time;Total volume;\
# VWAP;Single trade flag
# e.g.: 701.28;1;1348075471534;67854;701.46918464;true
(last_trade_price, last_trade_size, last_trade_time, total_volume,
vwap, single_trade_flag) = value.split(';')
# Ignore this update if last_trade_price is empty:
# tickString: tickerId=0 tickType=48/RTVolume ;0;1469805548873;\
# 240304;216.648653;true
if len(last_trade_price) == 0:
return
last_trade_dt = pd.to_datetime(float(last_trade_time), unit='ms',
utc=True)
self._add_bar(symbol, float(last_trade_price),
int(last_trade_size), last_trade_dt,
int(total_volume), float(vwap),
single_trade_flag)
def _add_bar(self, symbol, last_trade_price, last_trade_size,
last_trade_time, total_volume, vwap, single_trade_flag):
bar = pd.DataFrame(index=pd.DatetimeIndex([last_trade_time]),
data={'last_trade_price': last_trade_price,
'last_trade_size': last_trade_size,
'total_volume': total_volume,
'vwap': vwap,
'single_trade_flag': single_trade_flag})
if symbol not in self.bars:
self.bars[symbol] = bar
else:
self.bars[symbol] = self.bars[symbol].append(bar)
def tickPrice(self, ticker_id, field, price, can_auto_execute):
self._process_tick(ticker_id, tick_type=field, value=price)
def tickSize(self, ticker_id, field, size):
self._process_tick(ticker_id, tick_type=field, value=size)
def tickOptionComputation(self,
ticker_id, field, implied_vol, delta, opt_price,
pv_dividend, gamma, vega, theta, und_price):
log_message('tickOptionComputation', vars())
def tickGeneric(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickString(self, ticker_id, tick_type, value):
self._process_tick(ticker_id, tick_type=tick_type, value=value)
def tickEFP(self, ticker_id, tick_type, basis_points,
formatted_basis_points, implied_future, hold_days,
future_expiry, dividend_impact, dividends_to_expiry):
log_message('tickEFP', vars())
def updateAccountValue(self, key, value, currency, account_name):
self.accounts[account_name][currency][key] = value
def updatePortfolio(self,
contract,
position,
market_price,
market_value,
average_cost,
unrealized_pnl,
realized_pnl,
account_name):
symbol = contract.m_symbol
position = Position(contract=contract,
position=position,
market_price=market_price,
market_value=market_value,
average_cost=average_cost,
unrealized_pnl=unrealized_pnl,
realized_pnl=realized_pnl,
account_name=account_name)
self.positions[symbol] = position
def updateAccountTime(self, time_stamp):
pass
def accountDownloadEnd(self, account_name):
self.accounts_download_complete = True
def nextValidId(self, order_id):
self._next_order_id = order_id
def contractDetails(self, req_id, contract_details):
log_message('contractDetails', vars())
def contractDetailsEnd(self, req_id):
log_message('contractDetailsEnd', vars())
def bondContractDetails(self, req_id, contract_details):
log_message('bondContractDetails', vars())
def orderStatus(self, order_id, status, filled, remaining, avg_fill_price,
perm_id, parent_id, last_fill_price, client_id, why_held):
self.order_statuses[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"filled={filled} remaining={remaining} "
"avg_fill_price={avg_fill_price} "
"last_fill_price={last_fill_price} ".format(
order_id=order_id,
status=self.order_statuses[order_id]['status'],
filled=self.order_statuses[order_id]['filled'],
remaining=self.order_statuses[order_id]['remaining'],
avg_fill_price=self.order_statuses[order_id]['avg_fill_price'],
last_fill_price=self.order_statuses[order_id]['last_fill_price']))
def openOrder(self, order_id, contract, order, state):
self.open_orders[order_id] = _method_params_to_dict(vars())
log.debug(
"Order-{order_id} {status}: "
"{order_action} {order_count} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price}".format(
order_id=order_id,
status=state.m_status,
order_action=order.m_action,
order_count=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice))
def openOrderEnd(self):
pass
def execDetails(self, req_id, contract, exec_detail):
order_id, exec_id = exec_detail.m_orderId, exec_detail.m_execId
self.executions[order_id][exec_id] = _method_params_to_dict(vars())
self._execution_to_order_id[exec_id] = order_id
log.info(
"Order-{order_id} executed @ {exec_time}: "
"{symbol} current: {shares} @ ${price} "
"total: {cum_qty} @ ${avg_price} "
"exec_id: {exec_id} by client-{client_id}".format(
order_id=order_id, exec_id=exec_id,
exec_time=pd.to_datetime(exec_detail.m_time),
symbol=contract.m_symbol,
shares=exec_detail.m_shares,
price=exec_detail.m_price,
cum_qty=exec_detail.m_cumQty,
avg_price=exec_detail.m_avgPrice,
client_id=exec_detail.m_clientId))
def execDetailsEnd(self, req_id):
log.debug(
"Execution details completed for request {req_id}".format(
req_id=req_id))
def commissionReport(self, commission_report):
exec_id = commission_report.m_execId
order_id = self._execution_to_order_id[commission_report.m_execId]
self.commissions[order_id][exec_id] = commission_report
log.debug(
"Order-{order_id} report: "
"realized_pnl: ${realized_pnl} "
"commission: ${commission} yield: {yield_} "
"exec_id: {exec_id}".format(
order_id=order_id,
exec_id=commission_report.m_execId,
realized_pnl=commission_report.m_realizedPNL
if commission_report.m_realizedPNL != sys.float_info.max
else 0,
commission=commission_report.m_commission,
yield_=commission_report.m_yield
if commission_report.m_yield != sys.float_info.max
else 0)
)
def connectionClosed(self):
self.unrecoverable_error = True
log.error("IB Connection closed")
def error(self, id_=None, error_code=None, error_msg=None):
if isinstance(id_, Exception):
# XXX: for an unknown reason 'log' is None in this branch,
# therefore it needs to be instantiated before use
global log
if not log:
log = Logger('IB Broker')
log.exception(id_)
if isinstance(error_code, EClientErrors.CodeMsgPair):
error_msg = error_code.msg()
error_code = error_code.code()
if isinstance(error_code, int):
if error_code in (502, 503, 326):
# 502: Couldn't connect to TWS.
# 503: The TWS is out of date and must be upgraded.
# 326: Unable connect as the client id is already in use.
self.unrecoverable_error = True
if error_code < 1000:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.info("[{}] {} ({})".format(error_code, error_msg, id_))
else:
log.error("[{}] {} ({})".format(error_code, error_msg, id_))
def updateMktDepth(self, ticker_id, position, operation, side, price,
size):
log_message('updateMktDepth', vars())
def updateMktDepthL2(self, ticker_id, position, market_maker, operation,
side, price, size):
log_message('updateMktDepthL2', vars())
def updateNewsBulletin(self, msg_id, msg_type, message, orig_exchange):
log_message('updateNewsBulletin', vars())
def managedAccounts(self, accounts_list):
self.managed_accounts = accounts_list.split(',')
def receiveFA(self, fa_data_type, xml):
log_message('receiveFA', vars())
def historicalData(self, req_id, date, open_, high, low, close, volume,
count, wap, has_gaps):
log_message('historicalData', vars())
def scannerParameters(self, xml):
log_message('scannerParameters', vars())
def scannerData(self, req_id, rank, contract_details, distance, benchmark,
projection, legs_str):
log_message('scannerData', vars())
def currentTime(self, time):
self.time_skew = (pd.to_datetime('now', utc=True) -
pd.to_datetime(long(time), unit='s', utc=True))
def deltaNeutralValidation(self, req_id, under_comp):
log_message('deltaNeutralValidation', vars())
def fundamentalData(self, req_id, data):
log_message('fundamentalData', vars())
def marketDataType(self, req_id, market_data_type):
log_message('marketDataType', vars())
def realtimeBar(self, req_id, time, open_, high, low, close, volume, wap,
count):
value = (";".join([str(close), str(count), str(time), str(volume),
str(wap), "true"]))
self._process_tick(req_id, tick_type=48, value=value)
def scannerDataEnd(self, req_id):
log_message('scannerDataEnd', vars())
def tickSnapshotEnd(self, req_id):
log_message('tickSnapshotEnd', vars())
def position(self, account, contract, pos, avg_cost):
log_message('position', vars())
def positionEnd(self):
log_message('positionEnd', vars())
def accountSummary(self, req_id, account, tag, value, currency):
log_message('accountSummary', vars())
def accountSummaryEnd(self, req_id):
log_message('accountSummaryEnd', vars())
class IBBroker(Broker):
def __init__(self, tws_uri, account_id=None):
"""
:param tws_uri: host:listening_port:client_id
- host ip of running tws or ibgw
- port, default for tws 7496 and for ibgw 4002
- your client id, could be any number as long as it's not already used
"""
self._tws_uri = tws_uri
self._orders = {}
self._transactions = {}
self._tws = TWSConnection(tws_uri)
self.account_id = (self._tws.managed_accounts[0] if account_id is None
else account_id)
self.currency = 'USD'
self._subscribed_assets = []
super(self.__class__, self).__init__()
@property
def subscribed_assets(self):
return self._subscribed_assets
def subscribe_to_market_data(self, asset):
if asset not in self.subscribed_assets:
log.info("Subscribing to market data for {}".format(
asset))
# remove str() cast to have a fun debugging journey
self._tws.subscribe_to_market_data(str(asset.symbol))
self._subscribed_assets.append(asset)
try:
polling.poll(
lambda: asset.symbol in self._tws.bars,
timeout=_max_wait_subscribe,
step=_poll_frequency)
except polling.TimeoutException as te:
log.warning('!!!WARNING: I did not manage to subscribe to %s ' % str(asset.symbol))
else:
log.debug("Subscription completed")
@property
def positions(self):
self._get_positions_from_broker()
return self.metrics_tracker.positions
def _get_positions_from_broker(self):
"""
get the positions from the broker and update zipline objects ( the ledger )
should be used once at startup and once every time we want to refresh the positions array
"""
cur_pos_in_tracker = self.metrics_tracker.positions
for symbol in self._tws.positions:
ib_position = self._tws.positions[symbol]
try:
z_position = zp.Position(zp.InnerPosition(symbol_lookup(symbol)))
editable_position = MutableView(z_position)
except SymbolNotFound:
# The symbol might not have been ingested to the db therefore
# it needs to be skipped.
log.warning('Wanted to subscribe to %s, but this asset is probably not ingested' % symbol)
continue
editable_position._underlying_position.amount = int(ib_position.position)
editable_position._underlying_position.cost_basis = float(ib_position.average_cost)
# Check if symbol exists in bars df
if symbol in self._tws.bars:
editable_position._underlying_position.last_sale_price = \
float(self._tws.bars[symbol].last_trade_price.iloc[-1])
editable_position._underlying_position.last_sale_date = \
self._tws.bars[symbol].index.values[-1]
else:
# editable_position._underlying_position.last_sale_price = None # this cannot be set to None. only numbers.
editable_position._underlying_position.last_sale_date = None
self.metrics_tracker.update_position(z_position.asset,
amount=z_position.amount,
last_sale_price=z_position.last_sale_price,
last_sale_date=z_position.last_sale_date,
cost_basis=z_position.cost_basis)
for asset in cur_pos_in_tracker:
if asset.symbol not in self._tws.positions:
# deleting object from the metrcs_tracker as its not in the portfolio
self.metrics_tracker.update_position(asset,
amount=0)
# for some reason, the metrics tracker has self.positions AND self.portfolio.positions. let's make sure
# these objects are consistent
# (self.portfolio.positions is self.metrics_tracker._ledger._portfolio.positions)
# (self.metrics_tracker.positions is self.metrics_tracker._ledger.position_tracker.positions)
self.metrics_tracker._ledger._portfolio.positions = self.metrics_tracker.positions
@property
def portfolio(self):
positions = self.positions
return self.metrics_tracker.portfolio
def get_account_from_broker(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
return ib_account
def set_metrics_tracker(self, metrics_tracker):
self.metrics_tracker = metrics_tracker
@property
def account(self):
ib_account = self._tws.accounts[self.account_id][self.currency]
self.metrics_tracker.override_account_fields(
settled_cash=float(ib_account['CashBalance']),
accrued_interest=float(ib_account['AccruedCash']),
buying_power=float(ib_account['BuyingPower']),
equity_with_loan=float(ib_account['EquityWithLoanValue']),
total_positions_value=float(ib_account['StockMarketValue']),
total_positions_exposure=float(
(float(ib_account['StockMarketValue']) /
(float(ib_account['StockMarketValue']) +
float(ib_account['TotalCashValue'])))),
regt_equity=float(ib_account['RegTEquity']),
regt_margin=float(ib_account['RegTMargin']),
initial_margin_requirement=float(
ib_account['FullInitMarginReq']),
maintenance_margin_requirement=float(
ib_account['FullMaintMarginReq']),
available_funds=float(ib_account['AvailableFunds']),
excess_liquidity=float(ib_account['ExcessLiquidity']),
cushion=float(
self._tws.accounts[self.account_id]['']['Cushion']),
day_trades_remaining=float(
self._tws.accounts[self.account_id]['']['DayTradesRemaining']),
leverage=float(
self._tws.accounts[self.account_id]['']['Leverage-S']),
net_leverage=(
float(ib_account['StockMarketValue']) /
(float(ib_account['TotalCashValue']) +
float(ib_account['StockMarketValue']))),
net_liquidation=float(ib_account['NetLiquidation'])
)
return self.metrics_tracker.account
@property
def time_skew(self):
return self._tws.time_skew
def is_alive(self):
return not self._tws.unrecoverable_error
@staticmethod
def _safe_symbol_lookup(symbol):
try:
return symbol_lookup(symbol)
except SymbolNotFound:
return None
_zl_order_ref_magic = '!ZL'
@classmethod
def _create_order_ref(cls, ib_order, dt=pd.to_datetime('now', utc=True)):
order_type = ib_order.m_orderType.replace(' ', '_')
return \
"A:{action} Q:{qty} T:{order_type} " \
"L:{limit_price} S:{stop_price} D:{date} {magic}".format(
action=ib_order.m_action,
qty=ib_order.m_totalQuantity,
order_type=order_type,
limit_price=ib_order.m_lmtPrice,
stop_price=ib_order.m_auxPrice,
date=int(dt.value / 1e9),
magic=cls._zl_order_ref_magic)
@classmethod
def _parse_order_ref(cls, ib_order_ref):
if not ib_order_ref or \
not ib_order_ref.endswith(cls._zl_order_ref_magic):
return None
try:
action, qty, order_type, limit_price, stop_price, dt, _ = \
ib_order_ref.split(' ')
if not all(
[action.startswith('A:'),
qty.startswith('Q:'),
order_type.startswith('T:'),
limit_price.startswith('L:'),
stop_price.startswith('S:'),
dt.startswith('D:')]):
return None
return {
'action': action[2:],
'qty': int(qty[2:]),
'order_type': order_type[2:].replace('_', ' '),
'limit_price': float(limit_price[2:]),
'stop_price': float(stop_price[2:]),
'dt': pd.to_datetime(dt[2:], unit='s', utc=True)}
except ValueError:
log.warning("Error parsing order metadata: {}".format(
ib_order_ref))
return None
def order(self, asset, amount, style):
contract = Contract()
contract.m_symbol = str(asset.symbol)
contract.m_currency = self.currency
contract.m_exchange = symbol_to_exchange[str(asset.symbol)]
contract.m_secType = symbol_to_sec_type[str(asset.symbol)]
order = Order()
order.m_totalQuantity = int(fabs(amount))
order.m_action = "BUY" if amount > 0 else "SELL"
is_buy = (amount > 0)
order.m_lmtPrice = style.get_limit_price(is_buy) or 0
order.m_auxPrice = style.get_stop_price(is_buy) or 0
if isinstance(style, MarketOrder):
order.m_orderType = "MKT"
elif isinstance(style, LimitOrder):
order.m_orderType = "LMT"
elif isinstance(style, StopOrder):
order.m_orderType = "STP"
elif isinstance(style, StopLimitOrder):
order.m_orderType = "STP LMT"
# TODO: Support GTC orders both here and at blotter_live
order.m_tif = "DAY"
order.m_orderRef = self._create_order_ref(order)
ib_order_id = self._tws.next_order_id
zp_order = self._get_or_create_zp_order(ib_order_id, order, contract)
log.info(
"Placing order-{order_id}: "
"{action} {qty} {symbol} with {order_type} order. "
"limit_price={limit_price} stop_price={stop_price} {tif}".format(
order_id=ib_order_id,
action=order.m_action,
qty=order.m_totalQuantity,
symbol=contract.m_symbol,
order_type=order.m_orderType,
limit_price=order.m_lmtPrice,
stop_price=order.m_auxPrice,
tif=order.m_tif
))
self._tws.placeOrder(ib_order_id, contract, order)
return zp_order
@property
def orders(self):
self._update_orders()
return self._orders
def _ib_to_zp_order_id(self, ib_order_id):
return "IB-{date}-{account_id}-{client_id}-{order_id}".format(
date=str(pd.to_datetime('today').date()),
account_id=self.account_id,
client_id=self._tws.client_id,
order_id=ib_order_id)
@staticmethod
def _action_qty_to_amount(action, qty):
return qty if action == 'BUY' else -1 * qty
def _get_or_create_zp_order(self, ib_order_id,
ib_order=None, ib_contract=None):
zp_order_id = self._ib_to_zp_order_id(ib_order_id)
if zp_order_id in self._orders:
return self._orders[zp_order_id]
# Try to reconstruct the order from the given information:
# open order state and execution state
symbol, order_details = None, None
if ib_order and ib_contract:
symbol = ib_contract.m_symbol
order_details = self._parse_order_ref(ib_order.m_orderRef)
if not order_details and ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]
symbol = open_order['contract'].m_symbol
order_details = self._parse_order_ref(
open_order['order'].m_orderRef)
if not order_details and ib_order_id in self._tws.executions:
executions = self._tws.executions[ib_order_id]
last_exec_detail = list(executions.values())[-1]['exec_detail']
last_exec_contract = list(executions.values())[-1]['contract']
symbol = last_exec_contract.m_symbol
order_details = self._parse_order_ref(last_exec_detail.m_orderRef)
asset = self._safe_symbol_lookup(symbol)
if not asset:
log.warning(
"Ignoring symbol {symbol} which has associated "
"order but it is not registered in bundle".format(
symbol=symbol))
return None
if order_details:
amount = self._action_qty_to_amount(order_details['action'],
order_details['qty'])
stop_price = order_details['stop_price']
limit_price = order_details['limit_price']
dt = order_details['dt']
else:
dt = pd.to_datetime('now', utc=True)
amount, stop_price, limit_price = 0, None, None
if ib_order_id in self._tws.open_orders:
open_order = self._tws.open_orders[ib_order_id]['order']
amount = self._action_qty_to_amount(
open_order.m_action, open_order.m_totalQuantity)
stop_price = open_order.m_auxPrice
limit_price = open_order.m_lmtPrice
stop_price = None if stop_price == 0 else stop_price
limit_price = None if limit_price == 0 else limit_price
self._orders[zp_order_id] = ZPOrder(
dt=dt,
asset=asset,
amount=amount,
stop=stop_price,
limit=limit_price,
id=zp_order_id)
self._orders[zp_order_id].broker_order_id = ib_order_id
return self._orders[zp_order_id]
@staticmethod
def _ib_to_zp_status(ib_status):
ib_status = ib_status.lower()
if ib_status == 'submitted':
return ZP_ORDER_STATUS.OPEN
elif ib_status in ('pendingsubmit',
'pendingcancel',
'presubmitted'):
return ZP_ORDER_STATUS.HELD
elif ib_status == 'cancelled':
return ZP_ORDER_STATUS.CANCELLED
elif ib_status == 'filled':
return ZP_ORDER_STATUS.FILLED
elif ib_status == 'inactive':
return ZP_ORDER_STATUS.REJECTED
else:
return None
def _update_orders(self):
def _update_from_order_status(zp_order, ib_order_id):
if ib_order_id in self._tws.open_orders:
open_order_state = self._tws.open_orders[ib_order_id]['state']
zp_status = self._ib_to_zp_status(open_order_state.m_status)
if zp_status is None:
log.warning(
"Order-{order_id}: "
"unknown order status: {order_status}.".format(
order_id=ib_order_id,
order_status=open_order_state.m_status))
else:
zp_order.status = zp_status
if ib_order_id in self._tws.order_statuses:
order_status = self._tws.order_statuses[ib_order_id]
zp_order.filled = order_status['filled']
zp_status = self._ib_to_zp_status(order_status['status'])
if zp_status:
zp_order.status = zp_status
else:
log.warning("Order-{order_id}: "
"unknown order status: {order_status}."
.format(order_id=ib_order_id,
order_status=order_status['status']))
def _update_from_execution(zp_order, ib_order_id):
if ib_order_id in self._tws.executions and \
ib_order_id not in self._tws.open_orders:
zp_order.status = ZP_ORDER_STATUS.FILLED
executions = self._tws.executions[ib_order_id]
last_exec_detail = \
list(executions.values())[-1]['exec_detail']
zp_order.filled = last_exec_detail.m_cumQty
all_ib_order_ids = (set([e.broker_order_id
for e in self._orders.values()]) |
set(self._tws.open_orders.keys()) |
set(self._tws.order_statuses.keys()) |
set(self._tws.executions.keys()) |
set(self._tws.commissions.keys()))
for ib_order_id in all_ib_order_ids:
zp_order = self._get_or_create_zp_order(ib_order_id)
if zp_order:
_update_from_execution(zp_order, ib_order_id)
_update_from_order_status(zp_order, ib_order_id)
@property
def transactions(self):
self._update_transactions()
return self._transactions
def _update_transactions(self):
all_orders = list(self.orders.values())
for ib_order_id, executions in iteritems(self._tws.executions):
orders = [order
for order in all_orders
if order.broker_order_id == ib_order_id]
if not orders:
log.warning("No order found for executions: {}".format(
executions))
continue
assert len(orders) == 1
order = orders[0]
for exec_id, execution in iteritems(executions):
if exec_id in self._transactions:
continue
try:
commission = self._tws.commissions[ib_order_id][exec_id] \
.m_commission
except KeyError:
log.warning(
"Commission not found for execution: {}".format(
exec_id))
commission = 0
exec_detail = execution['exec_detail']
is_buy = order.amount > 0
amount = (exec_detail.m_shares if is_buy
else -1 * exec_detail.m_shares)
tx = Transaction(
asset=order.asset,
amount=amount,
dt=pd.to_datetime(exec_detail.m_time, utc=True),
price=exec_detail.m_price,
order_id=order.id
)
self._transactions[exec_id] = tx
def cancel_order(self, zp_order_id):
ib_order_id = self.orders[zp_order_id].broker_order_id
self._tws.cancelOrder(ib_order_id)
def get_spot_value(self, assets, field, dt, data_frequency):
symbol = str(assets.symbol)
self.subscribe_to_market_data(assets)
bars = self._tws.bars[symbol]
last_event_time = bars.index[-1]
minute_start = (last_event_time - pd.Timedelta('1 min')) \
.time()
minute_end = last_event_time.time()
if bars.empty:
return pd.NaT if field == 'last_traded' else np.NaN
else:
if field == 'price':
return bars.last_trade_price.iloc[-1]
elif field == 'last_traded':
return last_event_time or pd.NaT
minute_df = bars.between_time(minute_start, minute_end,
include_start=True, include_end=True)
if minute_df.empty:
return np.NaN
else:
if field == 'open':
return minute_df.last_trade_price.iloc[0]
elif field == 'close':
return minute_df.last_trade_price.iloc[-1]
elif field == 'high':
return minute_df.last_trade_price.max()
elif field == 'low':
return minute_df.last_trade_price.min()
elif field == 'volume':
return minute_df.last_trade_size.sum()
def get_last_traded_dt(self, asset):
self.subscribe_to_market_data(asset)
return self._tws.bars[asset.symbol].index[-1]
def get_realtime_bars(self, assets, frequency):
if frequency == '1m':
resample_freq = '1 Min'
elif frequency == '1d':
resample_freq = '24 H'
else:
raise ValueError("Invalid frequency specified: %s" % frequency)
df = pd.DataFrame()
for asset in assets:
symbol = str(asset.symbol)
self.subscribe_to_market_data(asset)
trade_prices = self._tws.bars[symbol]['last_trade_price']
trade_sizes = self._tws.bars[symbol]['last_trade_size']
ohlcv = trade_prices.resample(resample_freq).ohlc()
ohlcv['volume'] = trade_sizes.resample(resample_freq).sum()
# Add asset as level 0 column; ohlcv will be used as level 1 cols
ohlcv.columns = pd.MultiIndex.from_product([[symbol, ],
ohlcv.columns])
df = pd.concat([df, ohlcv], axis=1)
return df | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/gens/brokers/ib_broker.py | ib_broker.py |
import os
import yaml
CONFIG_PATH = os.environ.get("ZIPLINE_TRADER_CONFIG")
if CONFIG_PATH:
with open(CONFIG_PATH, mode='r') as f:
ZIPLINE_CONFIG = yaml.safe_load(f)
def db_backend_configured():
if CONFIG_PATH and ZIPLINE_CONFIG.get("backend"):
return ZIPLINE_CONFIG["backend"].get("type", False)
else:
return os.environ.get('ZIPLINE_DATA_BACKEND')
class PostgresDB:
if CONFIG_PATH:
if ZIPLINE_CONFIG.get("backend"):
pg = ZIPLINE_CONFIG["backend"]["postgres"]
else:
pg = None
@property
def host(self):
"""
you could define it in the zipline-trader config file or
override it with this env variable: ZIPLINE_DATA_BACKEND_HOST
:return:
"""
val = None
if os.environ.get('ZIPLINE_DATA_BACKEND_HOST'):
val = os.environ.get('ZIPLINE_DATA_BACKEND_HOST')
elif CONFIG_PATH and self.pg.get('host'):
val = self.pg.get('host')
if not val:
raise Exception("Postgres host not defined by user")
return val
@property
def port(self):
"""
you could define it in the zipline-trader config file or
override it with this env variable: ZIPLINE_DATA_BACKEND_PORT
:return:
"""
val = None
if os.environ.get('ZIPLINE_DATA_BACKEND_PORT'):
val = os.environ.get('ZIPLINE_DATA_BACKEND_PORT')
elif CONFIG_PATH and self.pg.get('port'):
val = self.pg.get('port')
if not val:
raise Exception("Postgres port not defined by user")
return int(val)
@property
def user(self):
"""
you could define it in the zipline-trader config file or
override it with this env variable: ZIPLINE_DATA_BACKEND_USER
:return:
"""
val = None
if os.environ.get('ZIPLINE_DATA_BACKEND_USER'):
val = os.environ.get('ZIPLINE_DATA_BACKEND_USER')
elif CONFIG_PATH and self.pg.get('user'):
val = self.pg.get('user')
if not val:
raise Exception("Postgres user not defined by user")
return val
@property
def password(self):
"""
you could define it in the zipline-trader config file or
override it with this env variable: ZIPLINE_DATA_BACKEND_PASSWORD
:return:
"""
val = None
if os.environ.get('ZIPLINE_DATA_BACKEND_PASSWORD'):
val = os.environ.get('ZIPLINE_DATA_BACKEND_PASSWORD')
elif CONFIG_PATH and self.pg.get('password'):
val = self.pg.get('password')
if not val:
raise Exception("Postgres password not defined by user")
return val
if __name__ == '__main__':
print(ZIPLINE_CONFIG)
db = PostgresDB()
print(db.host)
os.environ["ZIPLINE_DATA_BACKEND_HOST"] = "localhost"
print(db.host)
del db.pg["host"]
del os.environ["ZIPLINE_DATA_BACKEND_HOST"]
try:
print(db.host)
except Exception as e:
print(e)
print(db.port)
os.environ["ZIPLINE_DATA_BACKEND_PORT"] = "5433"
assert 5433 == db.port
print(db.user)
os.environ["ZIPLINE_DATA_BACKEND_USER"] = "userrr"
assert "userrr" == db.user
print(db.password)
os.environ["ZIPLINE_DATA_BACKEND_PASSWORD"] = "passdd"
assert "passdd" == db.password | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/config/data_backend.py | data_backend.py |
import os
import yaml
CONFIG_PATH = os.environ.get("ZIPLINE_TRADER_CONFIG")
if CONFIG_PATH:
with open(CONFIG_PATH, mode='r') as f:
ZIPLINE_CONFIG = yaml.safe_load(f)
class AlpacaConfig:
if CONFIG_PATH and ZIPLINE_CONFIG.get("alpaca"):
al = ZIPLINE_CONFIG["alpaca"]
else:
al = {}
@property
def key(self):
if CONFIG_PATH and self.al:
return self.al["key_id"]
else:
return os.environ.get('APCA_API_KEY_ID')
@property
def secret(self):
if CONFIG_PATH and self.al:
return self.al["secret"]
else:
return os.environ.get('APCA_API_SECRET_KEY')
@property
def base_url(self):
if CONFIG_PATH and self.al:
return self.al["base_url"]
else:
return os.environ.get('APCA_API_BASE_URL')
@property
def universe(self):
if CONFIG_PATH and self.al:
return self.al["universe"]
else:
return os.environ.get('ZT_UNIVERSE')
@property
def custom_asset_list(self):
if CONFIG_PATH and self.al:
return self.al.get("custom_asset_list")
else:
return os.environ.get('ZT_CUSTOM_ASSET_LIST')
class AlphaVantage:
if CONFIG_PATH and ZIPLINE_CONFIG.get("alpha-vantage"):
av = ZIPLINE_CONFIG["alpha-vantage"]
else:
av = {}
@property
def sample_frequency(self):
"""
how long to wait between samples. default for free accounts - 1 min.
so we could do 5 sample per minute.
you could define it in the config file or override it with env variable
:return:
"""
val = 60
if os.environ.get('AV_FREQ_SEC'):
val = int(os.environ.get('AV_FREQ_SEC'))
elif CONFIG_PATH and self.av.get('AV_FREQ_SEC'):
val = int(self.av.get('AV_FREQ_SEC'))
return val
@property
def max_calls_per_freq(self):
"""
max api calls you could do per frequency period.
free account can do 5 calls per minute
you could define it in the config file or override it with env variable
:return:
"""
val = 5
if os.environ.get('AV_CALLS_PER_FREQ'):
val = int(os.environ.get('AV_CALLS_PER_FREQ'))
elif CONFIG_PATH and self.av.get('AV_CALLS_PER_FREQ'):
val = int(self.av.get('AV_CALLS_PER_FREQ'))
return val
@property
def breathing_space(self):
"""
to make sure we don't pass the limit we take some breathing room for sampling error.
you could define it in the config file or override it with env variable
:return:
"""
val = 1
if os.environ.get('AV_TOLERANCE_SEC'):
val = int(os.environ.get('AV_TOLERANCE_SEC'))
elif CONFIG_PATH and self.av.get('AV_TOLERANCE_SEC'):
val = int(self.av.get('AV_TOLERANCE_SEC'))
return val
@property
def api_key(self):
"""
api key for alpha vantage
you could define it in the config file or override it with env variable
:return:
"""
val = ''
if os.environ.get('ALPHAVANTAGE_API_KEY'):
val = os.environ.get('ALPHAVANTAGE_API_KEY')
elif CONFIG_PATH and self.av.get('ALPHAVANTAGE_API_KEY'):
val = self.av.get('ALPHAVANTAGE_API_KEY')
return val
def get_binance_config():
if CONFIG_PATH and ZIPLINE_CONFIG.get("binance"):
return ZIPLINE_CONFIG["binance"]
else:
return {}
if __name__ == '__main__':
print(ZIPLINE_CONFIG)
print(AlpacaConfig().key)
av_conf = AlphaVantage()
print(av_conf.sample_frequency)
print(av_conf.max_calls_per_freq)
print(get_binance_config()) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/config/bundle.py | bundle.py |
from abc import abstractmethod
from numpy import (
array,
full,
recarray,
searchsorted,
vstack,
where,
)
from pandas import NaT as pd_NaT
from zipline.errors import (
WindowLengthNotPositive,
UnsupportedDataType,
NonExistentAssetInTimeFrame,
NoFurtherDataError,
)
from zipline.lib.labelarray import LabelArray, labelarray_where
from zipline.utils.context_tricks import nop_context
from zipline.utils.input_validation import expect_dtypes, expect_types
from zipline.utils.numpy_utils import bool_dtype
from zipline.utils.pandas_utils import nearest_unequal_elements
from .downsample_helpers import (
select_sampling_indices,
expect_downsample_frequency,
)
from .sentinels import NotSpecified
from .term import Term
class PositiveWindowLengthMixin(Term):
"""
Validation mixin enforcing that a Term gets a positive WindowLength
"""
def _validate(self):
super(PositiveWindowLengthMixin, self)._validate()
if not self.windowed:
raise WindowLengthNotPositive(window_length=self.window_length)
class SingleInputMixin(Term):
"""
Validation mixin enforcing that a Term gets a length-1 inputs list.
"""
def _validate(self):
super(SingleInputMixin, self)._validate()
num_inputs = len(self.inputs)
if num_inputs != 1:
raise ValueError(
"{typename} expects only one input, "
"but received {num_inputs} instead.".format(
typename=type(self).__name__,
num_inputs=num_inputs
)
)
class StandardOutputs(Term):
"""
Validation mixin enforcing that a Term cannot produce non-standard outputs.
"""
def _validate(self):
super(StandardOutputs, self)._validate()
if self.outputs is not NotSpecified:
raise ValueError(
"{typename} does not support custom outputs,"
" but received custom outputs={outputs}.".format(
typename=type(self).__name__,
outputs=self.outputs,
)
)
class RestrictedDTypeMixin(Term):
"""
Validation mixin enforcing that a term has a specific dtype.
"""
ALLOWED_DTYPES = NotSpecified
def _validate(self):
super(RestrictedDTypeMixin, self)._validate()
assert self.ALLOWED_DTYPES is not NotSpecified, (
"ALLOWED_DTYPES not supplied on subclass "
"of RestrictedDTypeMixin: %s." % type(self).__name__
)
if self.dtype not in self.ALLOWED_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
)
class CustomTermMixin(Term):
"""
Mixin for user-defined rolling-window Terms.
Implements `_compute` in terms of a user-defined `compute` function, which
is mapped over the input windows.
Used by CustomFactor, CustomFilter, CustomClassifier, etc.
"""
ctx = nop_context
def __new__(cls,
inputs=NotSpecified,
outputs=NotSpecified,
window_length=NotSpecified,
mask=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
ndim=NotSpecified,
**kwargs):
unexpected_keys = set(kwargs) - set(cls.params)
if unexpected_keys:
raise TypeError(
"{termname} received unexpected keyword "
"arguments {unexpected}".format(
termname=cls.__name__,
unexpected={k: kwargs[k] for k in unexpected_keys},
)
)
return super(CustomTermMixin, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
dtype=dtype,
missing_value=missing_value,
ndim=ndim,
**kwargs
)
def compute(self, today, assets, out, *arrays):
"""
Override this method with a function that writes a value into `out`.
"""
raise NotImplementedError(
"{name} must define a compute method".format(
name=type(self).__name__
)
)
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out
def _format_inputs(self, windows, column_mask):
inputs = []
for input_ in windows:
window = next(input_)
if window.shape[1] == 1:
# Do not mask single-column inputs.
inputs.append(window)
else:
inputs.append(window[:, column_mask])
return inputs
def _compute(self, windows, dates, assets, mask):
"""
Call the user's `compute` function on each window with a pre-built
output array.
"""
format_inputs = self._format_inputs
compute = self.compute
params = self.params
ndim = self.ndim
shape = (len(mask), 1) if ndim == 1 else mask.shape
out = self._allocate_output(windows, shape)
with self.ctx:
for idx, date in enumerate(dates):
# Never apply a mask to 1D outputs.
out_mask = array([True]) if ndim == 1 else mask[idx]
# Mask our inputs as usual.
inputs_mask = mask[idx]
masked_assets = assets[inputs_mask]
out_row = out[idx][out_mask]
inputs = format_inputs(windows, inputs_mask)
compute(date, masked_assets, out_row, *inputs, **params)
out[idx][out_mask] = out_row
return out
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return type(self).__name__ + ':\\l window_length: %d\\l' % \
self.window_length
class LatestMixin(SingleInputMixin):
"""
Common behavior for :attr:`zipline.pipeline.data.BoundColumn.latest`.
Given a :class:`~zipline.pipeline.data.DataSet` named ``MyData`` with a
column ``col`` of numeric dtype, the following expression:
.. code-block:: python
factor = MyData.col.latest
is equivalent to:
.. code-block:: python
class Latest(CustomFactor):
inputs = [MyData.col]
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
factor = Latest()
The behavior is the same for columns of boolean or string dtype, except the
resulting expression will be a :class:`~zipline.pipeline.CustomFilter` for
boolean columns, and the resulting object will be a
:class:`~zipline.pipeline.CustomClassifier` for string or integer columns.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
def _validate(self):
super(LatestMixin, self)._validate()
if self.inputs[0].dtype != self.dtype:
raise TypeError(
"{name} expected an input of dtype {expected}, "
"but got {actual} instead.".format(
name=type(self).__name__,
expected=self.dtype,
actual=self.inputs[0].dtype,
)
)
def graph_repr(self):
return "Latest"
class UniversalMixin(Term):
"""
Base class for "universal" mixins.
Universal mixins are used to implement expressions that need separate
subclasses for each of the ComputableTerm subclasses (Factor, Filter, and
Classifier). Such expressions are usually return types of methods of
ComputableTerm, such as `downsample()`, `alias()`, or `fillna()`.
A type may only inherit from one UniversalMixin.
"""
# Memo dict mapping pairs of (mixin_type, principal_type) to subtypes.
_UNIVERSAL_MIXIN_SUBTYPES = {}
@staticmethod
@abstractmethod
def _universal_mixin_type():
raise NotImplementedError('_universal_mixin_type')
@staticmethod
@abstractmethod
def _universal_mixin_specialization_name(principal_type):
raise NotImplementedError('_universal_mixin_specialization_name')
@classmethod
def universal_mixin_specialization(cls, principal_type):
"""
Create a new subtype of `principal_type` that adds this mixin to
``principal_type``. ``principal_type`` will be one of Factor, Filter,
or Classifier.
"""
mixin = cls._universal_mixin_type()
memo_key = (mixin, principal_type)
try:
return cls._UNIVERSAL_MIXIN_SUBTYPES[memo_key]
except KeyError:
new_type = type(
mixin._universal_mixin_specialization_name(principal_type),
(mixin, principal_type),
{'__module__': principal_type.__module__},
)
cls._UNIVERSAL_MIXIN_SUBTYPES[memo_key] = new_type
return new_type
class AliasedMixin(SingleInputMixin, UniversalMixin):
"""
Mixin for aliased terms.
"""
def __new__(cls, term, name):
return super(AliasedMixin, cls).__new__(
cls,
inputs=(term,),
outputs=term.outputs,
window_length=0,
name=name,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
window_safe=term.window_safe,
)
def _init(self, name, *args, **kwargs):
self.name = name
return super(AliasedMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, name, *args, **kwargs):
return (
super(AliasedMixin, cls)._static_identity(*args, **kwargs),
name,
)
def _compute(self, inputs, dates, assets, mask):
return inputs[0]
def __repr__(self):
return '{type}({inner}, name={name!r})'.format(
type=type(self).__name__,
inner=self.inputs[0].recursive_repr(),
name=self.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return self.name
@staticmethod
def _universal_mixin_type():
return AliasedMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return 'Aliased' + principal_type.__name__
class DownsampledMixin(StandardOutputs, UniversalMixin):
"""Universal mixin for downsampled terms.
A downsampled term is a wrapper around the "real" term that performs actual
computation. The downsampler is responsible for calling the real term's
`compute` method at selected intervals and forward-filling the computed
values.
Downsampling is not currently supported for terms with multiple outputs.
"""
# There's no reason to take a window of a downsampled term. The whole
# point is that you're re-using the same result multiple times.
window_safe = False
@expect_types(term=Term)
@expect_downsample_frequency
def __new__(cls, term, frequency):
return super(DownsampledMixin, cls).__new__(
cls,
inputs=term.inputs,
outputs=term.outputs,
window_length=term.window_length,
mask=term.mask,
frequency=frequency,
wrapped_term=term,
dtype=term.dtype,
missing_value=term.missing_value,
ndim=term.ndim,
)
def _init(self, frequency, wrapped_term, *args, **kwargs):
self._frequency = frequency
self._wrapped_term = wrapped_term
return super(DownsampledMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, frequency, wrapped_term, *args, **kwargs):
return (
super(DownsampledMixin, cls)._static_identity(*args, **kwargs),
frequency,
wrapped_term,
)
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Ensure that min_extra_rows pushes us back to a computation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. This will be the minimum
number of rows required to make our computed start_date fall on a
recomputation date.
"""
try:
current_start_pos = all_dates.get_loc(start_date) - min_extra_rows
if current_start_pos < 0:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=all_dates[0],
lookback_start=start_date,
lookback_length=min_extra_rows,
)
except KeyError:
before, after = nearest_unequal_elements(all_dates, start_date)
raise ValueError(
"Pipeline start_date {start_date} is not in calendar.\n"
"Latest date before start_date is {before}.\n"
"Earliest date after start_date is {after}.".format(
start_date=start_date,
before=before,
after=after,
)
)
# Our possible target dates are all the dates on or before the current
# starting position.
# TODO: Consider bounding this below by self.window_length
candidates = all_dates[:current_start_pos + 1]
# Choose the latest date in the candidates that is the start of a new
# period at our frequency.
choices = select_sampling_indices(candidates, self._frequency)
# If we have choices, the last choice is the first date if the
# period containing current_start_date. Choose it.
new_start_date = candidates[choices[-1]]
# Add the difference between the new and old start dates to get the
# number of rows for the new start_date.
new_start_pos = all_dates.get_loc(new_start_date)
assert new_start_pos <= current_start_pos, \
"Computed negative extra rows!"
return min_extra_rows + (current_start_pos - new_start_pos)
def _compute(self, inputs, dates, assets, mask):
"""
Compute by delegating to self._wrapped_term._compute on sample dates.
On non-sample dates, forward-fill from previously-computed samples.
"""
to_sample = dates[select_sampling_indices(dates, self._frequency)]
assert to_sample[0] == dates[0], \
"Misaligned sampling dates in %s." % type(self).__name__
real_compute = self._wrapped_term._compute
# Inputs will contain different kinds of values depending on whether or
# not we're a windowed computation.
# If we're windowed, then `inputs` is a list of iterators of ndarrays.
# If we're not windowed, then `inputs` is just a list of ndarrays.
# There are two things we care about doing with the input:
# 1. Preparing an input to be passed to our wrapped term.
# 2. Skipping an input if we're going to use an already-computed row.
# We perform these actions differently based on the expected kind of
# input, and we encapsulate these actions with closures so that we
# don't clutter the code below with lots of branching.
if self.windowed:
# If we're windowed, inputs are stateful AdjustedArrays. We don't
# need to do any preparation before forwarding to real_compute, but
# we need to call `next` on them if we want to skip an iteration.
def prepare_inputs():
return inputs
def skip_this_input():
for w in inputs:
next(w)
else:
# If we're not windowed, inputs are just ndarrays. We need to
# slice out a single row when forwarding to real_compute, but we
# don't need to do anything to skip an input.
def prepare_inputs():
# i is the loop iteration variable below.
return [a[[i]] for a in inputs]
def skip_this_input():
pass
results = []
samples = iter(to_sample)
next_sample = next(samples)
for i, compute_date in enumerate(dates):
if next_sample == compute_date:
results.append(
real_compute(
prepare_inputs(),
dates[i:i + 1],
assets,
mask[i:i + 1],
)
)
try:
next_sample = next(samples)
except StopIteration:
# No more samples to take. Set next_sample to Nat, which
# compares False with any other datetime.
next_sample = pd_NaT
else:
skip_this_input()
# Copy results from previous sample period.
results.append(results[-1])
# We should have exhausted our sample dates.
try:
next_sample = next(samples)
except StopIteration:
pass
else:
raise AssertionError("Unconsumed sample date: %s" % next_sample)
# Concatenate stored results.
return vstack(results)
@staticmethod
def _universal_mixin_type():
return DownsampledMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return 'Downsampled' + principal_type.__name__
class SliceMixin(UniversalMixin):
"""Universal mixin for taking columnar slices of terms.
Parameters
----------
term : zipline.pipeline.Term
The term from which to extract a column of data.
asset : zipline.assets.Asset
The asset corresponding to the column of `term` to be extracted.
Notes
-----
Users should rarely construct instances of `Slice` directly. Instead, they
should construct instances via indexing, e.g. `MyFactor()[Asset(24)]`.
"""
def __new__(cls, term, asset):
return super(SliceMixin, cls).__new__(
cls,
asset=asset,
inputs=[term],
window_length=0,
mask=term.mask,
dtype=term.dtype,
missing_value=term.missing_value,
window_safe=term.window_safe,
ndim=1,
)
def __repr__(self):
return "{parent_term}[{asset}]".format(
parent_term=self.inputs[0].recursive_repr(),
asset=self._asset,
)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SliceMixin, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SliceMixin, cls)._static_identity(*args, **kwargs),
asset,
)
def _compute(self, windows, dates, assets, mask):
asset = self._asset
asset_column = searchsorted(assets.values, asset.sid)
if assets[asset_column] != asset.sid:
raise NonExistentAssetInTimeFrame(
asset=asset, start_date=dates[0], end_date=dates[-1],
)
# Return a 2D array with one column rather than a 1D array of the
# column.
return windows[0][:, [asset_column]]
@property
def asset(self):
"""Get the asset whose data is selected by this slice.
"""
return self._asset
@staticmethod
def _universal_mixin_type():
return SliceMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return principal_type.__name__ + 'Slice'
class IfElseMixin(UniversalMixin):
"""Universal mixin for types returned by Filter.if_else.
"""
window_length = 0
@expect_dtypes(condition=bool_dtype)
def __new__(cls, condition, if_true, if_false):
return super(IfElseMixin, cls).__new__(
cls,
inputs=[condition, if_true, if_false],
dtype=if_true.dtype,
ndim=if_true.ndim,
missing_value=if_true.missing_value,
window_safe=all((
condition.window_safe,
if_true.window_safe,
if_false.window_safe,
)),
outputs=if_true.outputs,
)
def _compute(self, inputs, assets, dates, mask):
if self.dtype == object:
return labelarray_where(inputs[0], inputs[1], inputs[2])
return where(inputs[0], inputs[1], inputs[2])
@staticmethod
def _universal_mixin_type():
return IfElseMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return 'IfElse' + principal_type.__name__
class ConstantMixin(StandardOutputs, UniversalMixin):
"""Universal mixin for terms that produce a known constant value.
"""
window_length = 0
inputs = ()
params = ('const',)
def _compute(self, inputs, assets, dates, mask):
constant = self.params['const']
out = full(mask.shape, constant, dtype=self.dtype)
if self.dtype == object:
return LabelArray(
out,
categories=[constant],
missing_value=self.missing_value,
)
return out
@staticmethod
def _universal_mixin_type():
return ConstantMixin
@staticmethod
def _universal_mixin_specialization_name(principal_type):
return 'Constant' + principal_type.__name__ | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/mixins.py | mixins.py |
from __future__ import unicode_literals
from contextlib import contextmanager
import errno
from functools import partial
from io import BytesIO
from subprocess import Popen, PIPE
from networkx import topological_sort
from six import iteritems
from zipline.pipeline.data import BoundColumn
from zipline.pipeline import Filter, Factor, Classifier, Term
from zipline.pipeline.term import AssetExists
class NoIPython(Exception):
pass
def delimit(delimiters, content):
"""
Surround `content` with the first and last characters of `delimiters`.
>>> delimit('[]', "foo") # doctest: +SKIP
'[foo]'
>>> delimit('""', "foo") # doctest: +SKIP
'"foo"'
"""
if len(delimiters) != 2:
raise ValueError(
"`delimiters` must be of length 2. Got %r" % delimiters
)
return ''.join([delimiters[0], content, delimiters[1]])
quote = partial(delimit, '""')
bracket = partial(delimit, '[]')
def begin_graph(f, name, **attrs):
writeln(f, "strict digraph %s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def begin_cluster(f, name, **attrs):
attrs.setdefault("label", quote(name))
writeln(f, "subgraph cluster_%s {" % name)
writeln(f, "graph {}".format(format_attrs(attrs)))
def end_graph(f):
writeln(f, '}')
@contextmanager
def graph(f, name, **attrs):
begin_graph(f, name, **attrs)
yield
end_graph(f)
@contextmanager
def cluster(f, name, **attrs):
begin_cluster(f, name, **attrs)
yield
end_graph(f)
def roots(g):
"Get nodes from graph G with indegree 0"
return set(n for n, d in iteritems(g.in_degree()) if d == 0)
def filter_nodes(include_asset_exists, nodes):
if include_asset_exists:
return nodes
return filter(lambda n: n is not AssetExists(), nodes)
def _render(g, out, format_, include_asset_exists=False):
"""
Draw `g` as a graph to `out`, in format `format`.
Parameters
----------
g : zipline.pipeline.graph.TermGraph
Graph to render.
out : file-like object
format_ : str {'png', 'svg'}
Output format.
include_asset_exists : bool
Whether to filter out `AssetExists()` nodes.
"""
graph_attrs = {'rankdir': 'TB', 'splines': 'ortho'}
cluster_attrs = {'style': 'filled', 'color': 'lightgoldenrod1'}
in_nodes = g.loadable_terms
out_nodes = list(g.outputs.values())
f = BytesIO()
with graph(f, "G", **graph_attrs):
# Write outputs cluster.
with cluster(f, 'Output', labelloc='b', **cluster_attrs):
for term in filter_nodes(include_asset_exists, out_nodes):
add_term_node(f, term)
# Write inputs cluster.
with cluster(f, 'Input', **cluster_attrs):
for term in filter_nodes(include_asset_exists, in_nodes):
add_term_node(f, term)
# Write intermediate results.
for term in filter_nodes(include_asset_exists,
topological_sort(g.graph)):
if term in in_nodes or term in out_nodes:
continue
add_term_node(f, term)
# Write edges
for source, dest in g.graph.edges():
if source is AssetExists() and not include_asset_exists:
continue
add_edge(f, id(source), id(dest))
cmd = ['dot', '-T', format_]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError(
"Couldn't find `dot` graph layout program. "
"Make sure Graphviz is installed and `dot` is on your path."
)
else:
raise
f.seek(0)
proc_stdout, proc_stderr = proc.communicate(f.read())
if proc_stderr:
raise RuntimeError(
"Error(s) while rendering graph: %s" % proc_stderr.decode('utf-8')
)
out.write(proc_stdout)
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
def writeln(f, s):
f.write((s + '\n').encode('utf-8'))
def fmt(obj):
if isinstance(obj, Term):
r = obj.graph_repr()
else:
r = obj
return '"%s"' % r
def add_term_node(f, term):
declare_node(f, id(term), attrs_for_node(term))
def declare_node(f, name, attributes):
writeln(f, "{0} {1};".format(name, format_attrs(attributes)))
def add_edge(f, source, dest):
writeln(f, "{0} -> {1};".format(source, dest))
def attrs_for_node(term, **overrides):
attrs = {
'shape': 'box',
'colorscheme': 'pastel19',
'style': 'filled',
'label': fmt(term),
}
if isinstance(term, BoundColumn):
attrs['fillcolor'] = '1'
if isinstance(term, Factor):
attrs['fillcolor'] = '2'
elif isinstance(term, Filter):
attrs['fillcolor'] = '3'
elif isinstance(term, Classifier):
attrs['fillcolor'] = '4'
attrs.update(**overrides or {})
return attrs
def format_attrs(attrs):
"""
Format key, value pairs from attrs into graphviz attrs format
Examples
--------
>>> format_attrs({'key1': 'value1', 'key2': 'value2'}) # doctest: +SKIP
'[key1=value1, key2=value2]'
"""
if not attrs:
return ''
entries = ['='.join((key, value)) for key, value in iteritems(attrs)]
return '[' + ', '.join(entries) + ']' | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/visualize.py | visualize.py |
import re
from itertools import chain
from numbers import Number
import numexpr
from numexpr.necompiler import getExprNames
from numpy import (
full,
inf,
)
from zipline.pipeline.term import Term, ComputableTerm
from zipline.utils.numpy_utils import bool_dtype
_VARIABLE_NAME_RE = re.compile("^(x_)([0-9]+)$")
# Map from op symbol to equivalent Python magic method name.
ops_to_methods = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
'%': '__mod__',
'**': '__pow__',
'&': '__and__',
'|': '__or__',
'^': '__xor__',
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__ge__',
'>': '__gt__',
}
# Map from method name to op symbol.
methods_to_ops = {v: k for k, v in ops_to_methods.items()}
# Map from op symbol to equivalent Python magic method name after flipping
# arguments.
ops_to_commuted_methods = {
'+': '__radd__',
'-': '__rsub__',
'*': '__rmul__',
'/': '__rdiv__',
'%': '__rmod__',
'**': '__rpow__',
'&': '__rand__',
'|': '__ror__',
'^': '__rxor__',
'<': '__gt__',
'<=': '__ge__',
'==': '__eq__',
'!=': '__ne__',
'>=': '__le__',
'>': '__lt__',
}
unary_ops_to_methods = {
'-': '__neg__',
'~': '__invert__',
}
UNARY_OPS = {'-'}
MATH_BINOPS = {'+', '-', '*', '/', '**', '%'}
FILTER_BINOPS = {'&', '|'} # NumExpr doesn't support xor.
COMPARISONS = {'<', '<=', '!=', '>=', '>', '=='}
NUMEXPR_MATH_FUNCS = {
'sin',
'cos',
'tan',
'arcsin',
'arccos',
'arctan',
'sinh',
'cosh',
'tanh',
'arcsinh',
'arccosh',
'arctanh',
'log',
'log10',
'log1p',
'exp',
'expm1',
'sqrt',
'abs',
}
NPY_MAXARGS = 32
def _ensure_element(tup, elem):
"""
Create a tuple containing all elements of tup, plus elem.
Returns the new tuple and the index of elem in the new tuple.
"""
try:
return tup, tup.index(elem)
except ValueError:
return tuple(chain(tup, (elem,))), len(tup)
class BadBinaryOperator(TypeError):
"""
Called when a bad binary operation is encountered.
Parameters
----------
op : str
The attempted operation
left : zipline.computable.Term
The left hand side of the operation.
right : zipline.computable.Term
The right hand side of the operation.
"""
def __init__(self, op, left, right):
super(BadBinaryOperator, self).__init__(
"Can't compute {left} {op} {right}".format(
op=op,
left=type(left).__name__,
right=type(right).__name__,
)
)
def method_name_for_op(op, commute=False):
"""
Get the name of the Python magic method corresponding to `op`.
Parameters
----------
op : str {'+','-','*', '/','**','&','|','^','<','<=','==','!=','>=','>'}
The requested operation.
commute : bool
Whether to return the name of an equivalent method after flipping args.
Returns
-------
method_name : str
The name of the Python magic method corresponding to `op`.
If `commute` is True, returns the name of a method equivalent to `op`
with inputs flipped.
Examples
--------
>>> method_name_for_op('+')
'__add__'
>>> method_name_for_op('+', commute=True)
'__radd__'
>>> method_name_for_op('>')
'__gt__'
>>> method_name_for_op('>', commute=True)
'__lt__'
"""
if commute:
return ops_to_commuted_methods[op]
return ops_to_methods[op]
def unary_op_name(op):
return unary_ops_to_methods[op]
def is_comparison(op):
return op in COMPARISONS
class NumericalExpression(ComputableTerm):
"""
Term binding to a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
dtype : np.dtype
The dtype for the expression.
"""
window_length = 0
def __new__(cls, expr, binds, dtype):
# We always allow filters to be used in windowed computations.
# Otherwise, an expression is window_safe if all its constituents are
# window_safe.
window_safe = (
(dtype == bool_dtype) or all(t.window_safe for t in binds)
)
return super(NumericalExpression, cls).__new__(
cls,
inputs=binds,
expr=expr,
dtype=dtype,
window_safe=window_safe,
)
def _init(self, expr, *args, **kwargs):
self._expr = expr
return super(NumericalExpression, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, expr, *args, **kwargs):
return (
super(NumericalExpression, cls)._static_identity(*args, **kwargs),
expr,
)
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
Compute our stored expression string with numexpr.
"""
out = full(mask.shape, self.missing_value, dtype=self.dtype)
# This writes directly into our output buffer.
numexpr.evaluate(
self._expr,
local_dict={
"x_%d" % idx: array
for idx, array in enumerate(arrays)
},
global_dict={'inf': inf},
out=out,
)
return out
def _rebind_variables(self, new_inputs):
"""
Return self._expr with all variables rebound to the indices implied by
new_inputs.
"""
expr = self._expr
# If we have 11+ variables, some of our variable names may be
# substrings of other variable names. For example, we might have x_1,
# x_10, and x_100. By enumerating in reverse order, we ensure that
# every variable name which is a substring of another variable name is
# processed after the variable of which it is a substring. This
# guarantees that the substitution of any given variable index only
# ever affects exactly its own index. For example, if we have variables
# with indices going up to 100, we will process all of the x_1xx names
# before x_1x, which will be before x_1, so the substitution of x_1
# will not affect x_1x, which will not affect x_1xx.
for idx, input_ in reversed(list(enumerate(self.inputs))):
old_varname = "x_%d" % idx
# Temporarily rebind to x_temp_N so that we don't overwrite the
# same value multiple times.
temp_new_varname = "x_temp_%d" % new_inputs.index(input_)
expr = expr.replace(old_varname, temp_new_varname)
# Clear out the temp variables now that we've finished iteration.
return expr.replace("_temp_", "_")
def _merge_expressions(self, other):
"""
Merge the inputs of two NumericalExpressions into a single input tuple,
rewriting their respective string expressions to make input names
resolve correctly.
Returns a tuple of (new_self_expr, new_other_expr, new_inputs)
"""
new_inputs = tuple(set(self.inputs).union(other.inputs))
new_self_expr = self._rebind_variables(new_inputs)
new_other_expr = other._rebind_variables(new_inputs)
return new_self_expr, new_other_expr, new_inputs
def build_binary_op(self, op, other):
"""
Compute new expression strings and a new inputs tuple for combining
self and other with a binary operator.
"""
if isinstance(other, NumericalExpression):
self_expr, other_expr, new_inputs = self._merge_expressions(other)
elif isinstance(other, Term):
self_expr = self._expr
new_inputs, other_idx = _ensure_element(self.inputs, other)
other_expr = "x_%d" % other_idx
elif isinstance(other, Number):
self_expr = self._expr
other_expr = str(other)
new_inputs = self.inputs
else:
raise BadBinaryOperator(op, self, other)
# If the merged inputs would be too many for numexpr, then don't merge
# them:
if len(new_inputs) >= NPY_MAXARGS:
self_expr = "x_0"
other_expr = "x_1"
new_inputs = self, other
return self_expr, other_expr, new_inputs
@property
def bindings(self):
return {
"x_%d" % i: input_
for i, input_ in enumerate(self.inputs)
}
def __repr__(self):
return "{typename}(expr='{expr}', bindings={bindings})".format(
typename=type(self).__name__,
expr=self._expr,
bindings=self.bindings,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Replace any floating point numbers in the expression
# with their scientific notation
final = re.sub(r"[-+]?\d*\.\d+",
lambda x: format(float(x.group(0)), '.2E'),
self._expr)
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Expression:\\l {}\\l".format(
final,
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/expression.py | expression.py |
import six
from zipline.errors import UnsupportedPipelineOutput
from zipline.utils.input_validation import (
expect_element,
expect_types,
optional,
)
from .domain import Domain, GENERIC, infer_domain
from .graph import ExecutionPlan, TermGraph, SCREEN_NAME
from .filters import Filter
from .term import AssetExists, ComputableTerm, Term
class Pipeline(object):
"""
A Pipeline object represents a collection of named expressions to be
compiled and executed by a PipelineEngine.
A Pipeline has two important attributes: 'columns', a dictionary of named
:class:`~zipline.pipeline.Term` instances, and 'screen', a
:class:`~zipline.pipeline.Filter` representing criteria for
including an asset in the results of a Pipeline.
To compute a pipeline in the context of a TradingAlgorithm, users must call
``attach_pipeline`` in their ``initialize`` function to register that the
pipeline should be computed each trading day. The most recent outputs of an
attached pipeline can be retrieved by calling ``pipeline_output`` from
``handle_data``, ``before_trading_start``, or a scheduled function.
Parameters
----------
columns : dict, optional
Initial columns.
screen : zipline.pipeline.Filter, optional
Initial screen.
"""
__slots__ = ('_columns', '_screen', '_domain', '__weakref__')
@expect_types(
columns=optional(dict),
screen=optional(Filter),
domain=Domain
)
def __init__(self, columns=None, screen=None, domain=GENERIC):
if columns is None:
columns = {}
validate_column = self.validate_column
for column_name, term in columns.items():
validate_column(column_name, term)
if not isinstance(term, ComputableTerm):
raise TypeError(
"Column {column_name!r} contains an invalid pipeline term "
"({term}). Did you mean to append '.latest'?".format(
column_name=column_name, term=term,
)
)
self._columns = columns
self._screen = screen
self._domain = domain
@property
def columns(self):
"""The output columns of this pipeline.
Returns
-------
columns : dict[str, zipline.pipeline.ComputableTerm]
Map from column name to expression computing that column's output.
"""
return self._columns
@property
def screen(self):
"""
The screen of this pipeline.
Returns
-------
screen : zipline.pipeline.Filter or None
Term defining the screen for this pipeline. If ``screen`` is a
filter, rows that do not pass the filter (i.e., rows for which the
filter computed ``False``) will be dropped from the output of this
pipeline before returning results.
Notes
-----
Setting a screen on a Pipeline does not change the values produced for
any rows: it only affects whether a given row is returned. Computing a
pipeline with a screen is logically equivalent to computing the
pipeline without the screen and then, as a post-processing-step,
filtering out any rows for which the screen computed ``False``.
"""
return self._screen
@expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""Add a column.
The results of computing ``term`` will show up as a column in the
DataFrame produced by running this pipeline.
Parameters
----------
column : zipline.pipeline.Term
A Filter, Factor, or Classifier to add to the pipeline.
name : str
Name of the column to add.
overwrite : bool
Whether to overwrite the existing entry if we already have a column
named `name`.
"""
self.validate_column(name, term)
columns = self.columns
if name in columns:
if overwrite:
self.remove(name)
else:
raise KeyError("Column '{}' already exists.".format(name))
if not isinstance(term, ComputableTerm):
raise TypeError(
"{term} is not a valid pipeline column. Did you mean to "
"append '.latest'?".format(term=term)
)
self._columns[name] = term
@expect_types(name=str)
def remove(self, name):
"""Remove a column.
Parameters
----------
name : str
The name of the column to remove.
Raises
------
KeyError
If `name` is not in self.columns.
Returns
-------
removed : zipline.pipeline.Term
The removed term.
"""
return self.columns.pop(name)
@expect_types(screen=Filter, overwrite=(bool, int))
def set_screen(self, screen, overwrite=False):
"""Set a screen on this Pipeline.
Parameters
----------
filter : zipline.pipeline.Filter
The filter to apply as a screen.
overwrite : bool
Whether to overwrite any existing screen. If overwrite is False
and self.screen is not None, we raise an error.
"""
if self._screen is not None and not overwrite:
raise ValueError(
"set_screen() called with overwrite=False and screen already "
"set.\n"
"If you want to apply multiple filters as a screen use "
"set_screen(filter1 & filter2 & ...).\n"
"If you want to replace the previous screen with a new one, "
"use set_screen(new_filter, overwrite=True)."
)
self._screen = screen
def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
"""
Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements.
"""
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
)
def to_simple_graph(self, default_screen):
"""
Compile into a simple TermGraph with no extra row metadata.
Parameters
----------
default_screen : zipline.pipeline.Term
Term to use as a screen if self.screen is None.
Returns
-------
graph : zipline.pipeline.graph.TermGraph
Graph encoding term dependencies.
"""
return TermGraph(self._prepare_graph_terms(default_screen))
def _prepare_graph_terms(self, default_screen):
"""Helper for to_graph and to_execution_plan."""
columns = self.columns.copy()
screen = self.screen
if screen is None:
screen = default_screen
columns[SCREEN_NAME] = screen
return columns
@expect_element(format=('svg', 'png', 'jpeg'))
def show_graph(self, format='svg'):
"""
Render this Pipeline as a DAG.
Parameters
----------
format : {'svg', 'png', 'jpeg'}
Image format to render with. Default is 'svg'.
"""
g = self.to_simple_graph(AssetExists())
if format == 'svg':
return g.svg
elif format == 'png':
return g.png
elif format == 'jpeg':
return g.jpeg
else:
# We should never get here because of the expect_element decorator
# above.
raise AssertionError("Unknown graph format %r." % format)
@staticmethod
@expect_types(term=Term, column_name=six.string_types)
def validate_column(column_name, term):
if term.ndim == 1:
raise UnsupportedPipelineOutput(column_name=column_name, term=term)
@property
def _output_terms(self):
"""
A list of terms that are outputs of this pipeline.
Includes all terms registered as data outputs of the pipeline, plus the
screen, if present.
"""
terms = list(six.itervalues(self._columns))
screen = self.screen
if screen is not None:
terms.append(screen)
return terms
@expect_types(default=Domain)
def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.domain.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.domain.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/pipeline.py | pipeline.py |
import datetime
from textwrap import dedent
from interface import default, implements, Interface
import numpy as np
import pandas as pd
import pytz
from trading_calendars import get_calendar
from zipline.country import CountryCode
from zipline.utils.formatting import bulleted_list
from zipline.utils.input_validation import expect_types, optional
from zipline.utils.memoize import lazyval
from zipline.utils.pandas_utils import days_at_time
class IDomain(Interface):
"""Domain interface.
"""
def all_sessions(self):
"""
Get all trading sessions for the calendar of this domain.
This determines the row labels of Pipeline outputs for pipelines run on
this domain.
Returns
-------
sessions : pd.DatetimeIndex
An array of all session labels for this domain.
"""
@property
def country_code(self):
"""The country code for this domain.
Returns
-------
code : str
The two-character country iso3166 country code for this domain.
"""
def data_query_cutoff_for_sessions(self, sessions):
"""Compute the data query cutoff time for the given sessions.
Parameters
----------
sessions : pd.DatetimeIndex
The sessions to get the data query cutoff times for. This index
will contain all midnight UTC values.
Returns
-------
data_query_cutoff : pd.DatetimeIndex
Timestamp of the last minute for which data should be considered
"available" on each session.
"""
@default
def roll_forward(self, dt):
"""
Given a date, align it to the calendar of the pipeline's domain.
Parameters
----------
dt : pd.Timestamp
Returns
-------
pd.Timestamp
"""
dt = pd.Timestamp(dt)
if not dt.tzinfo:
dt = dt.tz_localize('utc')
trading_days = self.all_sessions()
try:
return trading_days[trading_days.searchsorted(dt)]
except IndexError:
raise ValueError(
"Date {} was past the last session for domain {}. "
"The last session for this domain is {}.".format(
dt.date(),
self,
trading_days[-1].date()
)
)
Domain = implements(IDomain)
Domain.__doc__ = """
A domain represents a set of labels for the arrays computed by a Pipeline.
A domain defines two things:
1. A calendar defining the dates to which the pipeline's inputs and outputs
should be aligned. The calendar is represented concretely by a pandas
DatetimeIndex.
2. The set of assets that the pipeline should compute over. Right now, the only
supported way of representing this set is with a two-character country code
describing the country of assets over which the pipeline should compute. In
the future, we expect to expand this functionality to include more general
concepts.
"""
Domain.__name__ = "Domain"
Domain.__qualname__ = "zipline.pipeline.domain.Domain"
class GenericDomain(Domain):
"""Special singleton class used to represent generic DataSets and Columns.
"""
def all_sessions(self):
raise NotImplementedError("Can't get sessions for generic domain.")
@property
def country_code(self):
raise NotImplementedError("Can't get country code for generic domain.")
def data_query_cutoff_for_sessions(self, sessions):
raise NotImplementedError(
"Can't compute data query cutoff times for generic domain.",
)
def __repr__(self):
return "GENERIC"
GENERIC = GenericDomain()
class EquityCalendarDomain(Domain):
"""
An equity domain whose sessions are defined by a named TradingCalendar.
Parameters
----------
country_code : str
ISO-3166 two-letter country code of the domain
calendar_name : str
Name of the calendar, to be looked by by trading_calendar.get_calendar.
data_query_offset : np.timedelta64
The offset from market open when data should no longer be considered
available for a session. For example, a ``data_query_offset`` of
``-np.timedelta64(45, 'm')`` means that the data must have
been available at least 45 minutes prior to market open for it to
appear in the pipeline input for the given session.
"""
@expect_types(
country_code=str,
calendar_name=str,
__funcname='EquityCountryDomain',
)
def __init__(self,
country_code,
calendar_name,
data_query_offset=-np.timedelta64(45, 'm')):
self._country_code = country_code
self.calendar_name = calendar_name
self._data_query_offset = (
# add one minute because `open_time` is actually the open minute
# label which is one minute _after_ market open...
data_query_offset - np.timedelta64(1, 'm')
)
if data_query_offset >= datetime.timedelta(0):
raise ValueError(
'data must be ready before market open (offset must be < 0)',
)
@property
def country_code(self):
return self._country_code
@lazyval
def calendar(self):
return get_calendar(self.calendar_name)
def all_sessions(self):
return self.calendar.all_sessions
def data_query_cutoff_for_sessions(self, sessions):
opens = self.calendar.opens.reindex(sessions).values
missing_mask = pd.isnull(opens)
if missing_mask.any():
missing_days = sessions[missing_mask]
raise ValueError(
'cannot resolve data query time for sessions that are not on'
' the %s calendar:\n%s' % (
self.calendar.name,
missing_days,
),
)
return pd.DatetimeIndex(opens + self._data_query_offset, tz='UTC')
def __repr__(self):
return "EquityCalendarDomain({!r}, {!r})".format(
self.country_code, self.calendar_name,
)
AR_EQUITIES = EquityCalendarDomain(CountryCode.ARGENTINA, 'XBUE')
AT_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRIA, 'XWBO')
AU_EQUITIES = EquityCalendarDomain(CountryCode.AUSTRALIA, 'XASX')
BE_EQUITIES = EquityCalendarDomain(CountryCode.BELGIUM, 'XBRU')
BR_EQUITIES = EquityCalendarDomain(CountryCode.BRAZIL, 'BVMF')
CA_EQUITIES = EquityCalendarDomain(CountryCode.CANADA, 'XTSE')
CH_EQUITIES = EquityCalendarDomain(CountryCode.SWITZERLAND, 'XSWX')
CL_EQUITIES = EquityCalendarDomain(CountryCode.CHILE, 'XSGO')
CN_EQUITIES = EquityCalendarDomain(CountryCode.CHINA, 'XSHG')
CO_EQUITIES = EquityCalendarDomain(CountryCode.COLOMBIA, 'XBOG')
CZ_EQUITIES = EquityCalendarDomain(CountryCode.CZECH_REPUBLIC, 'XPRA')
DE_EQUITIES = EquityCalendarDomain(CountryCode.GERMANY, 'XFRA')
DK_EQUITIES = EquityCalendarDomain(CountryCode.DENMARK, 'XCSE')
ES_EQUITIES = EquityCalendarDomain(CountryCode.SPAIN, 'XMAD')
FI_EQUITIES = EquityCalendarDomain(CountryCode.FINLAND, 'XHEL')
FR_EQUITIES = EquityCalendarDomain(CountryCode.FRANCE, 'XPAR')
GB_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_KINGDOM, 'XLON')
GR_EQUITIES = EquityCalendarDomain(CountryCode.GREECE, 'ASEX')
HK_EQUITIES = EquityCalendarDomain(CountryCode.HONG_KONG, 'XHKG')
HU_EQUITIES = EquityCalendarDomain(CountryCode.HUNGARY, 'XBUD')
ID_EQUITIES = EquityCalendarDomain(CountryCode.INDONESIA, 'XIDX')
IE_EQUITIES = EquityCalendarDomain(CountryCode.IRELAND, 'XDUB')
IN_EQUITIES = EquityCalendarDomain(CountryCode.INDIA, "XBOM")
IT_EQUITIES = EquityCalendarDomain(CountryCode.ITALY, 'XMIL')
JP_EQUITIES = EquityCalendarDomain(CountryCode.JAPAN, 'XTKS')
KR_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_KOREA, 'XKRX')
MX_EQUITIES = EquityCalendarDomain(CountryCode.MEXICO, 'XMEX')
MY_EQUITIES = EquityCalendarDomain(CountryCode.MALAYSIA, 'XKLS')
NL_EQUITIES = EquityCalendarDomain(CountryCode.NETHERLANDS, 'XAMS')
NO_EQUITIES = EquityCalendarDomain(CountryCode.NORWAY, 'XOSL')
NZ_EQUITIES = EquityCalendarDomain(CountryCode.NEW_ZEALAND, 'XNZE')
PE_EQUITIES = EquityCalendarDomain(CountryCode.PERU, 'XLIM')
PH_EQUITIES = EquityCalendarDomain(CountryCode.PHILIPPINES, 'XPHS')
PK_EQUITIES = EquityCalendarDomain(CountryCode.PAKISTAN, 'XKAR')
PL_EQUITIES = EquityCalendarDomain(CountryCode.POLAND, 'XWAR')
PT_EQUITIES = EquityCalendarDomain(CountryCode.PORTUGAL, 'XLIS')
RU_EQUITIES = EquityCalendarDomain(CountryCode.RUSSIA, 'XMOS')
SE_EQUITIES = EquityCalendarDomain(CountryCode.SWEDEN, 'XSTO')
SG_EQUITIES = EquityCalendarDomain(CountryCode.SINGAPORE, 'XSES')
TH_EQUITIES = EquityCalendarDomain(CountryCode.THAILAND, 'XBKK')
TR_EQUITIES = EquityCalendarDomain(CountryCode.TURKEY, 'XIST')
TW_EQUITIES = EquityCalendarDomain(CountryCode.TAIWAN, 'XTAI')
US_EQUITIES = EquityCalendarDomain(CountryCode.UNITED_STATES, 'XNYS')
ZA_EQUITIES = EquityCalendarDomain(CountryCode.SOUTH_AFRICA, 'XJSE')
BUILT_IN_DOMAINS = [
AR_EQUITIES,
AT_EQUITIES,
AU_EQUITIES,
BE_EQUITIES,
BR_EQUITIES,
CA_EQUITIES,
CH_EQUITIES,
CL_EQUITIES,
CN_EQUITIES,
CO_EQUITIES,
CZ_EQUITIES,
DE_EQUITIES,
DK_EQUITIES,
ES_EQUITIES,
FI_EQUITIES,
FR_EQUITIES,
GB_EQUITIES,
GR_EQUITIES,
HK_EQUITIES,
HU_EQUITIES,
ID_EQUITIES,
IE_EQUITIES,
IN_EQUITIES,
IT_EQUITIES,
JP_EQUITIES,
KR_EQUITIES,
MX_EQUITIES,
MY_EQUITIES,
NL_EQUITIES,
NO_EQUITIES,
NZ_EQUITIES,
PE_EQUITIES,
PH_EQUITIES,
PK_EQUITIES,
PL_EQUITIES,
PT_EQUITIES,
RU_EQUITIES,
SE_EQUITIES,
SG_EQUITIES,
TH_EQUITIES,
TR_EQUITIES,
TW_EQUITIES,
US_EQUITIES,
ZA_EQUITIES,
]
def infer_domain(terms):
"""
Infer the domain from a collection of terms.
The algorithm for inferring domains is as follows:
- If all input terms have a domain of GENERIC, the result is GENERIC.
- If there is exactly one non-generic domain in the input terms, the result
is that domain.
- Otherwise, an AmbiguousDomain error is raised.
Parameters
----------
terms : iterable[zipline.pipeline.Term]
Returns
-------
inferred : Domain or NotSpecified
Raises
------
AmbiguousDomain
Raised if more than one concrete domain is present in the input terms.
"""
domains = {t.domain for t in terms}
num_domains = len(domains)
if num_domains == 0:
return GENERIC
elif num_domains == 1:
return domains.pop()
elif num_domains == 2 and GENERIC in domains:
domains.remove(GENERIC)
return domains.pop()
else:
# Remove GENERIC if it's present before raising. Showing it to the user
# is confusing because it doesn't contribute to the error.
domains.discard(GENERIC)
raise AmbiguousDomain(sorted(domains, key=repr))
# This would be better if we provided more context for which domains came from
# which terms.
class AmbiguousDomain(Exception):
"""
Raised when we attempt to infer a domain from a collection of mixed terms.
"""
_TEMPLATE = dedent(
"""\
Found terms with conflicting domains:
{domains}"""
)
def __init__(self, domains):
self.domains = domains
def __str__(self):
return self._TEMPLATE.format(
domains=bulleted_list(self.domains, indent=2),
)
class EquitySessionDomain(Domain):
"""A domain built directly from an index of sessions.
Mostly useful for testing.
Parameters
----------
sessions : pd.DatetimeIndex
Sessions to use as output labels for pipelines run on this domain.
country_code : str
ISO 3166 country code of equities to be used with this domain.
data_query_time : datetime.time, optional
The time of day when data should no longer be considered available for
a session.
data_query_date_offset : int, optional
The number of days to add to the session label before applying the
``data_query_time``. This can be used to express that the cutoff time
for a session falls on a different calendar day from the session label.
"""
@expect_types(
sessions=pd.DatetimeIndex,
country_code=str,
data_query_time=optional(datetime.time),
data_query_date_offset=int,
__funcname='EquitySessionDomain',
)
def __init__(self,
sessions,
country_code,
data_query_time=None,
data_query_date_offset=0):
self._country_code = country_code
self._sessions = sessions
if data_query_time is None:
data_query_time = datetime.time(0, 0, tzinfo=pytz.timezone('UTC'))
if data_query_time.tzinfo is None:
raise ValueError("data_query_time cannot be tz-naive")
self._data_query_time = data_query_time
self._data_query_date_offset = data_query_date_offset
@property
def country_code(self):
return self._country_code
def all_sessions(self):
return self._sessions
def data_query_cutoff_for_sessions(self, sessions):
return days_at_time(
sessions,
self._data_query_time,
self._data_query_time.tzinfo,
self._data_query_date_offset,
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/domain.py | domain.py |
from abc import ABCMeta, abstractmethod
from functools import partial
from six import iteritems, with_metaclass, viewkeys
from numpy import array, arange
from pandas import DataFrame, MultiIndex
from toolz import groupby
from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.input_validation import expect_types
from zipline.utils.numpy_utils import (
as_column,
repeat_first_axis,
repeat_last_axis,
)
from zipline.utils.pandas_utils import explode
from zipline.utils.string_formatting import bulleted_list
from .domain import Domain, GENERIC
from .graph import maybe_specialize
from .hooks import DelegatingHooks
from .term import AssetExists, InputDates, LoadableTerm
from zipline.utils.date_utils import compute_date_range_chunks
from zipline.utils.pandas_utils import categorical_df_concat
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
@abstractmethod
def run_chunked_pipeline(self,
pipeline,
start_date,
end_date,
chunksize,
hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in
date chunks of size ``chunksize``.
Chunked execution reduces memory consumption, and may reduce
computation time depending on the contents of your pipeline.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
raise NotImplementedError("run_chunked_pipeline")
class NoEngineRegistered(Exception):
"""
Raised if a user tries to call pipeline_output in an algorithm that hasn't
set up a pipeline engine.
"""
class ExplodingPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
raise NoEngineRegistered(
"Attempted to run a pipeline but no pipeline "
"resources were registered."
)
def run_chunked_pipeline(self,
pipeline,
start_date,
end_date,
chunksize,
hooks=None):
raise NoEngineRegistered(
"Attempted to run a chunked pipeline but no pipeline "
"resources were registered."
)
def default_populate_initial_workspace(initial_workspace,
root_mask_term,
execution_plan,
dates,
assets):
"""The default implementation for ``populate_initial_workspace``. This
function returns the ``initial_workspace`` argument without making any
modifications.
Parameters
----------
initial_workspace : dict[array-like]
The initial workspace before we have populated it with any cached
terms.
root_mask_term : Term
The root mask term, normally ``AssetExists()``. This is needed to
compute the dates for individual terms.
execution_plan : ExecutionPlan
The execution plan for the pipeline being run.
dates : pd.DatetimeIndex
All of the dates being requested in this pipeline run including
the extra dates for look back windows.
assets : pd.Int64Index
All of the assets that exist for the window being computed.
Returns
-------
populated_initial_workspace : dict[term, array-like]
The workspace to begin computations with.
"""
return initial_workspace
class SimplePipelineEngine(PipelineEngine):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given a loadable term and returns a PipelineLoader
to use to retrieve raw data for that term.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
populate_initial_workspace : callable, optional
A function which will be used to populate the initial workspace when
computing a pipeline. See
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
for more info.
default_hooks : list, optional
List of hooks that should be used to instrument all pipelines executed
by this engine.
See Also
--------
:func:`zipline.pipeline.engine.default_populate_initial_workspace`
"""
__slots__ = (
'_get_loader',
'_finder',
'_root_mask_term',
'_root_mask_dates_term',
'_populate_initial_workspace',
)
@expect_types(
default_domain=Domain,
__funcname='SimplePipelineEngine',
)
def __init__(self,
get_loader,
asset_finder,
default_domain=GENERIC,
populate_initial_workspace=None,
default_hooks=None):
self._get_loader = get_loader
self._finder = asset_finder
self._root_mask_term = AssetExists()
self._root_mask_dates_term = InputDates()
self._populate_initial_workspace = (
populate_initial_workspace or default_populate_initial_workspace
)
self._default_domain = default_domain
if default_hooks is None:
self._default_hooks = []
else:
self._default_hooks = list(default_hooks)
def run_chunked_pipeline(self,
pipeline,
start_date,
end_date,
chunksize,
hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``, in
date chunks of size ``chunksize``.
Chunked execution reduces memory consumption, and may reduce
computation time depending on the contents of your pipeline.
Parameters
----------
pipeline : Pipeline
The pipeline to run.
start_date : pd.Timestamp
The start date to run the pipeline for.
end_date : pd.Timestamp
The end date to run the pipeline for.
chunksize : int
The number of days to execute at a time.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
See Also
--------
:meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline`
"""
domain = self.resolve_domain(pipeline)
ranges = compute_date_range_chunks(
domain.all_sessions(),
start_date,
end_date,
chunksize,
)
hooks = self._resolve_hooks(hooks)
run_pipeline = partial(self._run_pipeline_impl, pipeline, hooks=hooks)
with hooks.running_pipeline(pipeline, start_date, end_date):
chunks = [run_pipeline(s, e) for s, e in ranges]
if len(chunks) == 1:
# OPTIMIZATION: Don't make an extra copy in `categorical_df_concat`
# if we don't have to.
return chunks[0]
# Filter out empty chunks. Empty dataframes lose dtype information,
# which makes concatenation fail.
nonempty_chunks = [c for c in chunks if len(c)]
return categorical_df_concat(nonempty_chunks, inplace=True)
def run_pipeline(self, pipeline, start_date, end_date, hooks=None):
"""
Compute values for ``pipeline`` from ``start_date`` to ``end_date``.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
hooks : list[implements(PipelineHooks)], optional
Hooks for instrumenting Pipeline execution.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The ``result`` columns correspond to the entries of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of :class:`zipline.pipeline.Term`.
For each date between ``start_date`` and ``end_date``, ``result``
will contain a row for each asset that passed `pipeline.screen`.
A screen of ``None`` indicates that a row should be returned for
each asset that existed each day.
"""
hooks = self._resolve_hooks(hooks)
with hooks.running_pipeline(pipeline, start_date, end_date):
return self._run_pipeline_impl(
pipeline,
start_date,
end_date,
hooks,
)
def _run_pipeline_impl(self, pipeline, start_date, end_date, hooks):
"""Shared core for ``run_pipeline`` and ``run_chunked_pipeline``.
"""
# See notes at the top of this module for a description of the
# algorithm implemented here.
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
domain = self.resolve_domain(pipeline)
plan = pipeline.to_execution_plan(
domain, self._root_mask_term, start_date, end_date,
)
extra_rows = plan.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(
domain, start_date, end_date, extra_rows,
)
dates, sids, root_mask_values = explode(root_mask)
workspace = self._populate_initial_workspace(
{
self._root_mask_term: root_mask_values,
self._root_mask_dates_term: as_column(dates.values)
},
self._root_mask_term,
plan,
dates,
sids,
)
refcounts = plan.initial_refcounts(workspace)
execution_order = plan.execution_order(workspace, refcounts)
with hooks.computing_chunk(execution_order,
start_date,
end_date):
results = self.compute_chunk(
graph=plan,
dates=dates,
sids=sids,
workspace=workspace,
refcounts=refcounts,
execution_order=execution_order,
hooks=hooks,
)
return self._to_narrow(
plan.outputs,
results,
results.pop(plan.screen_name),
dates[extra_rows:],
sids,
)
def _compute_root_mask(self, domain, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain for which we're computing a pipeline.
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
sessions = domain.all_sessions()
if start_date not in sessions:
raise ValueError(
"Pipeline start date ({}) is not a trading session for "
"domain {}.".format(start_date, domain)
)
elif end_date not in sessions:
raise ValueError(
"Pipeline end date {} is not a trading session for "
"domain {}.".format(end_date, domain)
)
start_idx, end_idx = sessions.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError.from_lookback_window(
initial_message="Insufficient data to compute Pipeline:",
first_date=sessions[0],
lookback_start=start_date,
lookback_length=extra_rows,
)
# NOTE: This logic should probably be delegated to the domain once we
# start adding more complex domains.
#
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
finder = self._finder
lifetimes = finder.lifetimes(
sessions[start_idx - extra_rows:end_idx],
include_start_date=False,
country_codes=(domain.country_code,),
)
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist from the farthest look back
# window through the end of the requested dates.
existed = lifetimes.any()
ret = lifetimes.loc[:, existed]
num_assets = ret.shape[1]
if num_assets == 0:
raise ValueError(
"Failed to find any assets with country_code {!r} that traded "
"between {} and {}.\n"
"This probably means that your asset db is old or that it has "
"incorrect country/exchange metadata.".format(
domain.country_code, start_date, end_date,
)
)
return ret
@staticmethod
def _inputs_for_term(term, workspace, graph, domain, refcounts):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
out = []
# We need to specialize here because we don't change ComputableTerm
# after resolving domains, so they can still contain generic terms as
# inputs.
specialized = [maybe_specialize(t, domain) for t in term.inputs]
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
for input_ in specialized:
adjusted_array = ensure_adjusted_array(
workspace[input_], input_.missing_value,
)
out.append(
adjusted_array.traverse(
window_length=term.window_length,
offset=offsets[term, input_],
# If the refcount for the input is > 1, we will need
# to traverse this array again so we must copy.
# If the refcount for the input == 0, this is the last
# traversal that will happen so we can invalidate
# the AdjustedArray and mutate the data in place.
copy=refcounts[input_] > 1,
)
)
else:
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
for input_ in specialized:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
input_data = input_data[offset:]
if refcounts[input_] > 1:
input_data = input_data.copy()
out.append(input_data)
return out
def compute_chunk(self,
graph,
dates,
sids,
workspace,
refcounts,
execution_order,
hooks):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
This is where we do the actual work of running a pipeline.
Parameters
----------
graph : zipline.pipeline.graph.ExecutionPlan
Dependency graph of the terms to be executed.
dates : pd.DatetimeIndex
Row labels for our root mask.
sids : pd.Int64Index
Column labels for our root mask.
workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
refcounts : dict[Term, int]
Dictionary mapping terms to number of dependent terms. When a
term's refcount hits 0, it can be safely discarded from
``workspace``. See TermGraph.decref_dependencies for more info.
execution_order : list[Term]
Order in which to execute terms.
hooks : implements(PipelineHooks)
Hooks to instrument pipeline execution.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(graph, dates, sids, workspace)
get_loader = self._get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = workspace.copy()
domain = graph.domain
# Many loaders can fetch data more efficiently if we ask them to
# retrieve all their inputs at once. For example, a loader backed by a
# SQL database can fetch multiple columns from the database in a single
# query.
#
# To enable these loaders to fetch their data efficiently, we group
# together requests for LoadableTerms if they are provided by the same
# loader and they require the same number of extra rows.
#
# The extra rows condition is a simplification: we don't currently have
# a mechanism for asking a loader to fetch different windows of data
# for different terms, so we only batch requests together when they're
# going to produce data for the same set of dates. That may change in
# the future if we find a loader that can still benefit significantly
# from batching unequal-length requests.
def loader_group_key(term):
loader = get_loader(term)
extra_rows = graph.extra_rows[term]
return loader, extra_rows
# Only produce loader groups for the terms we expect to load. This
# ensures that we can run pipelines for graphs where we don't have a
# loader registered for an atomic term if all the dependencies of that
# term were supplied in the initial workspace.
will_be_loaded = graph.loadable_terms - viewkeys(workspace)
loader_groups = groupby(
loader_group_key,
(t for t in execution_order if t in will_be_loaded),
)
for term in execution_order:
# `term` may have been supplied in `initial_workspace`, or we may
# have loaded `term` as part of a batch with another term coming
# from the same loader (see note on loader_group_key above). In
# either case, we already have the term computed, so don't
# recompute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = graph.mask_and_dates_for_term(
term,
self._root_mask_term,
workspace,
dates,
)
if isinstance(term, LoadableTerm):
loader = get_loader(term)
to_load = sorted(
loader_groups[loader_group_key(term)],
key=lambda t: t.dataset
)
self._ensure_can_load(loader, to_load)
with hooks.loading_terms(to_load):
loaded = loader.load_adjusted_array(
domain, to_load, mask_dates, sids, mask,
)
assert set(loaded) == set(to_load), (
'loader did not return an AdjustedArray for each column\n'
'expected: %r\n'
'got: %r' % (
sorted(to_load, key=repr),
sorted(loaded, key=repr),
)
)
workspace.update(loaded)
else:
with hooks.computing_term(term):
workspace[term] = term._compute(
self._inputs_for_term(
term,
workspace,
graph,
domain,
refcounts,
),
mask_dates,
sids,
mask,
)
if term.ndim == 2:
assert workspace[term].shape == mask.shape
else:
assert workspace[term].shape == (mask.shape[0], 1)
# Decref dependencies of ``term``, and clear any terms
# whose refcounts hit 0.
for garbage in graph.decref_dependencies(term, refcounts):
del workspace[garbage]
# At this point, all the output terms are in the workspace.
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, terms, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
terms : dict[str -> Term]
Dict mapping column names to terms.
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results for those names.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
if not mask.any():
# Manually handle the empty DataFrame case. This is a workaround
# to pandas failing to tz_localize an empty dataframe with a
# MultiIndex. It also saves us the work of applying a known-empty
# mask to each array.
#
# Slicing `dates` here to preserve pandas metadata.
empty_dates = dates[:0]
empty_assets = array([], dtype=object)
return DataFrame(
data={
name: array([], dtype=arr.dtype)
for name, arr in iteritems(data)
},
index=MultiIndex.from_arrays([empty_dates, empty_assets]),
)
final_columns = {}
for name in data:
# Each term that computed an output has its postprocess method
# called on the filtered result.
#
# As of Mon May 2 15:38:47 2016, we only use this to convert
# LabelArrays into categoricals.
final_columns[name] = terms[name].postprocess(data[name][mask])
resolved_assets = array(self._finder.retrieve_all(assets))
index = _pipeline_output_index(dates, resolved_assets, mask)
return DataFrame(data=final_columns, index=index)
def _validate_compute_chunk_params(self,
graph,
dates,
sids,
initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(sids)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
for term in initial_workspace:
if self._is_special_root_term(term):
continue
if term.domain is GENERIC:
# XXX: We really shouldn't allow **any** generic terms to be
# populated in the initial workspace. A generic term, by
# definition, can't correspond to concrete data until it's
# paired with a domain, and populate_initial_workspace isn't
# given the domain of execution, so it can't possibly know what
# data to use when populating a generic term.
#
# In our current implementation, however, we don't have a good
# way to represent specializations of ComputableTerms that take
# only generic inputs, so there's no good way for the initial
# workspace to provide data for such terms except by populating
# the generic ComputableTerm.
#
# The right fix for the above is to implement "full
# specialization", i.e., implementing ``specialize`` uniformly
# across all terms, not just LoadableTerms. Having full
# specialization will also remove the need for all of the
# remaining ``maybe_specialize`` calls floating around in this
# file.
#
# In the meantime, disallowing ComputableTerms in the initial
# workspace would break almost every test in
# `test_filter`/`test_factor`/`test_classifier`, and fixing
# them would require updating all those tests to compute with
# more specialized terms. Once we have full specialization, we
# can fix all the tests without a large volume of edits by
# simply specializing their workspaces, so for now I'm leaving
# this in place as a somewhat sharp edge.
if isinstance(term, LoadableTerm):
raise ValueError(
"Loadable workspace terms must be specialized to a "
"domain, but got generic term {}".format(term)
)
elif term.domain != graph.domain:
raise ValueError(
"Initial workspace term {} has domain {}. "
"Does not match pipeline domain {}".format(
term, term.domain, graph.domain,
)
)
def resolve_domain(self, pipeline):
"""Resolve a concrete domain for ``pipeline``.
"""
domain = pipeline.domain(default=self._default_domain)
if domain is GENERIC:
raise ValueError(
"Unable to determine domain for Pipeline.\n"
"Pass domain=<desired domain> to your Pipeline to set a "
"domain."
)
return domain
def _is_special_root_term(self, term):
return (
term is self._root_mask_term
or term is self._root_mask_dates_term
)
def _resolve_hooks(self, hooks):
if hooks is None:
hooks = []
return DelegatingHooks(self._default_hooks + hooks)
def _ensure_can_load(self, loader, terms):
"""Ensure that ``loader`` can load ``terms``.
"""
if not loader.currency_aware:
bad = [t for t in terms if t.currency_conversion is not None]
if bad:
raise ValueError(
"Requested currency conversion is not supported for the "
"following terms:\n{}".format(bulleted_list(bad))
)
def _pipeline_output_index(dates, assets, mask):
"""
Create a MultiIndex for a pipeline output.
Parameters
----------
dates : pd.DatetimeIndex
Row labels for ``mask``.
assets : pd.Index
Column labels for ``mask``.
mask : np.ndarray[bool]
Mask array indicating date/asset pairs that should be included in
output index.
Returns
-------
index : pd.MultiIndex
MultiIndex containing (date, asset) pairs corresponding to ``True``
values in ``mask``.
"""
date_labels = repeat_last_axis(arange(len(dates)), len(assets))[mask]
asset_labels = repeat_first_axis(arange(len(assets)), len(dates))[mask]
return MultiIndex(
levels=[dates, assets],
codes=[date_labels, asset_labels],
# TODO: We should probably add names for these.
names=[None, None],
verify_integrity=False,
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/engine.py | engine.py |
from abc import ABCMeta, abstractproperty, abstractmethod
from bisect import insort
from collections import Mapping
from weakref import WeakValueDictionary
from numpy import (
array,
dtype as dtype_class,
ndarray,
)
from six import with_metaclass
from zipline.assets import Asset
from zipline.errors import (
DTypeNotSpecified,
InvalidOutputName,
NonSliceableTerm,
NonWindowSafeInput,
NotDType,
NonPipelineInputs,
TermInputsNotSpecified,
TermOutputsEmpty,
UnsupportedDType,
WindowLengthNotSpecified,
)
from zipline.lib.adjusted_array import can_represent_dtype
from zipline.lib.labelarray import LabelArray
from zipline.utils.input_validation import expect_types
from zipline.utils.memoize import classlazyval, lazyval
from zipline.utils.numpy_utils import (
bool_dtype,
categorical_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
float64_dtype,
)
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_ALIAS_NAME_DOC,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
from .domain import Domain, GENERIC, infer_domain
from .downsample_helpers import expect_downsample_frequency
from .sentinels import NotSpecified
class Term(with_metaclass(ABCMeta, object)):
"""
Base class for objects that can appear in the compute graph of a
:class:`zipline.pipeline.Pipeline`.
Notes
-----
Most Pipeline API users only interact with :class:`Term` via subclasses:
- :class:`~zipline.pipeline.data.BoundColumn`
- :class:`~zipline.pipeline.Factor`
- :class:`~zipline.pipeline.Filter`
- :class:`~zipline.pipeline.Classifier`
Instances of :class:`Term` are **memoized**. If you call a Term's
constructor with the same arguments twice, the same object will be returned
from both calls:
**Example:**
>>> from zipline.pipeline.data import EquityPricing
>>> from zipline.pipeline.factors import SimpleMovingAverage
>>> x = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> y = SimpleMovingAverage(inputs=[EquityPricing.close], window_length=5)
>>> x is y
True
.. warning::
Memoization of terms means that it's generally unsafe to modify
attributes of a term after construction.
"""
# These are NotSpecified because a subclass is required to provide them.
dtype = NotSpecified
missing_value = NotSpecified
# Subclasses aren't required to provide `params`. The default behavior is
# no params.
params = ()
# All terms are generic by default.
domain = GENERIC
# Determines if a term is safe to be used as a windowed input.
window_safe = False
# The dimensions of the term's output (1D or 2D).
ndim = 2
_term_cache = WeakValueDictionary()
def __new__(cls,
domain=NotSpecified,
dtype=NotSpecified,
missing_value=NotSpecified,
window_safe=NotSpecified,
ndim=NotSpecified,
# params is explicitly not allowed to be passed to an instance.
*args,
**kwargs):
"""
Memoized constructor for Terms.
Caching previously-constructed Terms is useful because it allows us to
only compute equivalent sub-expressions once when traversing a Pipeline
dependency graph.
Caching previously-constructed Terms is **sane** because terms and
their inputs are both conceptually immutable.
"""
# Subclasses can override these class-level attributes to provide
# different default values for instances.
if domain is NotSpecified:
domain = cls.domain
if dtype is NotSpecified:
dtype = cls.dtype
if missing_value is NotSpecified:
missing_value = cls.missing_value
if ndim is NotSpecified:
ndim = cls.ndim
if window_safe is NotSpecified:
window_safe = cls.window_safe
dtype, missing_value = validate_dtype(
cls.__name__,
dtype,
missing_value,
)
params = cls._pop_params(kwargs)
identity = cls._static_identity(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
try:
return cls._term_cache[identity]
except KeyError:
new_instance = cls._term_cache[identity] = \
super(Term, cls).__new__(cls)._init(
domain=domain,
dtype=dtype,
missing_value=missing_value,
window_safe=window_safe,
ndim=ndim,
params=params,
*args, **kwargs
)
return new_instance
@classmethod
def _pop_params(cls, kwargs):
"""
Pop entries from the `kwargs` passed to cls.__new__ based on the values
in `cls.params`.
Parameters
----------
kwargs : dict
The kwargs passed to cls.__new__.
Returns
-------
params : list[(str, object)]
A list of string, value pairs containing the entries in cls.params.
Raises
------
TypeError
Raised if any parameter values are not passed or not hashable.
"""
params = cls.params
if not isinstance(params, Mapping):
params = {k: NotSpecified for k in params}
param_values = []
for key, default_value in params.items():
try:
value = kwargs.pop(key, default_value)
if value is NotSpecified:
raise KeyError(key)
# Check here that the value is hashable so that we fail here
# instead of trying to hash the param values tuple later.
hash(value)
except KeyError:
raise TypeError(
"{typename} expected a keyword parameter {name!r}.".format(
typename=cls.__name__,
name=key
)
)
except TypeError:
# Value wasn't hashable.
raise TypeError(
"{typename} expected a hashable value for parameter "
"{name!r}, but got {value!r} instead.".format(
typename=cls.__name__,
name=key,
value=value,
)
)
param_values.append((key, value))
return tuple(param_values)
def __init__(self, *args, **kwargs):
"""
Noop constructor to play nicely with our caching __new__. Subclasses
should implement _init instead of this method.
When a class' __new__ returns an instance of that class, Python will
automatically call __init__ on the object, even if a new object wasn't
actually constructed. Because we memoize instances, we often return an
object that was already initialized from __new__, in which case we
don't want to call __init__ again.
Subclasses that need to initialize new instances should override _init,
which is guaranteed to be called only once.
"""
pass
@expect_types(key=Asset)
def __getitem__(self, key):
if isinstance(self, LoadableTerm):
raise NonSliceableTerm(term=self)
from .mixins import SliceMixin
slice_type = type(self)._with_mixin(SliceMixin)
return slice_type(self, key)
@classmethod
def _static_identity(cls,
domain,
dtype,
missing_value,
window_safe,
ndim,
params):
"""
Return the identity of the Term that would be constructed from the
given arguments.
Identities that compare equal will cause us to return a cached instance
rather than constructing a new one. We do this primarily because it
makes dependency resolution easier.
This is a classmethod so that it can be called from Term.__new__ to
determine whether to produce a new instance.
"""
return (cls, domain, dtype, missing_value, window_safe, ndim, params)
def _init(self, domain, dtype, missing_value, window_safe, ndim, params):
"""
Parameters
----------
domain : zipline.pipeline.domain.Domain
The domain of this term.
dtype : np.dtype
Dtype of this term's output.
missing_value : object
Missing value for this term.
ndim : 1 or 2
The dimensionality of this term.
params : tuple[(str, hashable)]
Tuple of key/value pairs of additional parameters.
"""
self.domain = domain
self.dtype = dtype
self.missing_value = missing_value
self.window_safe = window_safe
self.ndim = ndim
for name, value in params:
if hasattr(self, name):
raise TypeError(
"Parameter {name!r} conflicts with already-present"
" attribute with value {value!r}.".format(
name=name,
value=getattr(self, name),
)
)
# TODO: Consider setting these values as attributes and replacing
# the boilerplate in NumericalExpression, Rank, and
# PercentileFilter.
self.params = dict(params)
# Make sure that subclasses call super() in their _validate() methods
# by setting this flag. The base class implementation of _validate
# should set this flag to True.
self._subclass_called_super_validate = False
self._validate()
assert self._subclass_called_super_validate, (
"Term._validate() was not called.\n"
"This probably means that you overrode _validate"
" without calling super()."
)
del self._subclass_called_super_validate
return self
def _validate(self):
"""
Assert that this term is well-formed. This should be called exactly
once, at the end of Term._init().
"""
# mark that we got here to enforce that subclasses overriding _validate
# call super().
self._subclass_called_super_validate = True
def compute_extra_rows(self,
all_dates,
start_date,
end_date,
min_extra_rows):
"""
Calculate the number of extra rows needed to compute ``self``.
Must return at least ``min_extra_rows``, and the default implementation
is to just return ``min_extra_rows``. This is overridden by
downsampled terms to ensure that the first date computed is a
recomputation date.
Parameters
----------
all_dates : pd.DatetimeIndex
The trading sessions against which ``self`` will be computed.
start_date : pd.Timestamp
The first date for which final output is requested.
end_date : pd.Timestamp
The last date for which final output is requested.
min_extra_rows : int
The minimum number of extra rows required of ``self``, as
determined by other terms that depend on ``self``.
Returns
-------
extra_rows : int
The number of extra rows to compute. Must be at least
``min_extra_rows``.
"""
return min_extra_rows
@abstractproperty
def inputs(self):
"""
A tuple of other Terms needed as inputs for ``self``.
"""
raise NotImplementedError('inputs')
@abstractproperty
def windowed(self):
"""
Boolean indicating whether this term is a trailing-window computation.
"""
raise NotImplementedError('windowed')
@abstractproperty
def mask(self):
"""
A :class:`~zipline.pipeline.Filter` representing asset/date pairs to
while computing this Term. True means include; False means exclude.
"""
raise NotImplementedError('mask')
@abstractproperty
def dependencies(self):
"""
A dictionary mapping terms that must be computed before `self` to the
number of extra rows needed for those terms.
"""
raise NotImplementedError('dependencies')
def graph_repr(self):
"""A short repr to use when rendering GraphViz graphs.
"""
# Default graph_repr is just the name of the type.
return type(self).__name__
def recursive_repr(self):
"""A short repr to use when recursively rendering terms with inputs.
"""
# Default recursive_repr is just the name of the type.
return type(self).__name__
class AssetExists(Term):
"""
Pseudo-filter describing whether or not an asset existed on a given day.
This is the default mask for all terms that haven't been passed a mask
explicitly.
This is morally a Filter, in the sense that it produces a boolean value for
every asset on every date. We don't subclass Filter, however, because
`AssetExists` is computed directly by the PipelineEngine.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
See Also
--------
zipline.assets.AssetFinder.lifetimes
"""
dtype = bool_dtype
dataset = None
inputs = ()
dependencies = {}
mask = None
windowed = False
def __repr__(self):
return "AssetExists()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"AssetExists cannot be computed directly."
" Check your PipelineEngine configuration."
)
class InputDates(Term):
"""
1-Dimensional term providing date labels for other term inputs.
This term is guaranteed to be available as an input for any term computed
by SimplePipelineEngine.run_pipeline().
"""
ndim = 1
dataset = None
dtype = datetime64ns_dtype
inputs = ()
dependencies = {}
mask = None
windowed = False
window_safe = True
def __repr__(self):
return "InputDates()"
graph_repr = __repr__
def _compute(self, today, assets, out):
raise NotImplementedError(
"InputDates cannot be computed directly."
" Check your PipelineEngine configuration."
)
class LoadableTerm(Term):
"""
A Term that should be loaded from an external resource by a PipelineLoader.
This is the base class for :class:`zipline.pipeline.data.BoundColumn`.
"""
windowed = False
inputs = ()
@lazyval
def dependencies(self):
return {self.mask: 0}
class ComputableTerm(Term):
"""
A Term that should be computed from a tuple of inputs.
This is the base class for :class:`zipline.pipeline.Factor`,
:class:`zipline.pipeline.Filter`, and :class:`zipline.pipeline.Classifier`.
"""
inputs = NotSpecified
outputs = NotSpecified
window_length = NotSpecified
mask = NotSpecified
domain = NotSpecified
def __new__(cls,
inputs=inputs,
outputs=outputs,
window_length=window_length,
mask=mask,
domain=domain,
*args, **kwargs):
if inputs is NotSpecified:
inputs = cls.inputs
# Having inputs = NotSpecified is an error, but we handle it later
# in self._validate rather than here.
if inputs is not NotSpecified:
# Allow users to specify lists as class-level defaults, but
# normalize to a tuple so that inputs is hashable.
inputs = tuple(inputs)
# Make sure all our inputs are valid pipeline objects before trying
# to infer a domain.
non_terms = [t for t in inputs if not isinstance(t, Term)]
if non_terms:
raise NonPipelineInputs(cls.__name__, non_terms)
if domain is NotSpecified:
domain = infer_domain(inputs)
if outputs is NotSpecified:
outputs = cls.outputs
if outputs is not NotSpecified:
outputs = tuple(outputs)
if mask is NotSpecified:
mask = cls.mask
if mask is NotSpecified:
mask = AssetExists()
if window_length is NotSpecified:
window_length = cls.window_length
return super(ComputableTerm, cls).__new__(
cls,
inputs=inputs,
outputs=outputs,
mask=mask,
window_length=window_length,
domain=domain,
*args, **kwargs
)
def _init(self, inputs, outputs, window_length, mask, *args, **kwargs):
self.inputs = inputs
self.outputs = outputs
self.window_length = window_length
self.mask = mask
return super(ComputableTerm, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
inputs,
outputs,
window_length,
mask,
*args,
**kwargs):
return (
super(ComputableTerm, cls)._static_identity(*args, **kwargs),
inputs,
outputs,
window_length,
mask,
)
def _validate(self):
super(ComputableTerm, self)._validate()
# Check inputs.
if self.inputs is NotSpecified:
raise TermInputsNotSpecified(termname=type(self).__name__)
if not isinstance(self.domain, Domain):
raise TypeError(
"Expected {}.domain to be an instance of Domain, "
"but got {}.".format(type(self).__name__, type(self.domain))
)
# Check outputs.
if self.outputs is NotSpecified:
pass
elif not self.outputs:
raise TermOutputsEmpty(termname=type(self).__name__)
else:
# Raise an exception if there are any naming conflicts between the
# term's output names and certain attributes.
disallowed_names = [
attr for attr in dir(ComputableTerm)
if not attr.startswith('_')
]
# The name 'compute' is an added special case that is disallowed.
# Use insort to add it to the list in alphabetical order.
insort(disallowed_names, 'compute')
for output in self.outputs:
if output.startswith('_') or output in disallowed_names:
raise InvalidOutputName(
output_name=output,
termname=type(self).__name__,
disallowed_names=disallowed_names,
)
if self.window_length is NotSpecified:
raise WindowLengthNotSpecified(termname=type(self).__name__)
if self.mask is NotSpecified:
# This isn't user error, this is a bug in our code.
raise AssertionError("{term} has no mask".format(term=self))
if self.window_length > 1:
for child in self.inputs:
if not child.window_safe:
raise NonWindowSafeInput(parent=self, child=child)
def _compute(self, inputs, dates, assets, mask):
"""
Subclasses should implement this to perform actual computation.
This is named ``_compute`` rather than just ``compute`` because
``compute`` is reserved for user-supplied functions in
CustomFilter/CustomFactor/CustomClassifier.
"""
raise NotImplementedError('_compute')
# NOTE: This is a method rather than a property because ABCMeta tries to
# access all abstract attributes of its child classes to see if
# they've been implemented. These accesses happen during subclass
# creation, before the new subclass has been bound to a name in its
# defining scope. Filter, Factor, and Classifier each implement this
# method to return themselves, but if the method is invoked before
# class definition is finished (which happens if this is a property),
# they fail with a NameError.
@classmethod
@abstractmethod
def _principal_computable_term_type(cls):
"""
Return the "principal" type for a ComputableTerm.
This returns either Filter, Factor, or Classifier, depending on the
type of ``cls``. It is used to implement behaviors like ``downsample``
and ``if_then_else`` that are implemented on all ComputableTerms, but
that need to produce different output types depending on the type of
the receiver.
"""
raise NotImplementedError('_principal_computable_term_type')
@lazyval
def windowed(self):
"""
Whether or not this term represents a trailing window computation.
If term.windowed is truthy, its compute_from_windows method will be
called with instances of AdjustedArray as inputs.
If term.windowed is falsey, its compute_from_baseline will be called
with instances of np.ndarray as inputs.
"""
return (
self.window_length is not NotSpecified
and self.window_length > 0
)
@lazyval
def dependencies(self):
"""
The number of extra rows needed for each of our inputs to compute this
term.
"""
extra_input_rows = max(0, self.window_length - 1)
out = {}
for term in self.inputs:
out[term] = extra_input_rows
out[self.mask] = 0
return out
@expect_types(data=ndarray)
def postprocess(self, data):
"""
Called with an result of ``self``, unravelled (i.e. 1-dimensional)
after any user-defined screens have been applied.
This is mostly useful for transforming the dtype of an output, e.g., to
convert a LabelArray into a pandas Categorical.
The default implementation is to just return data unchanged.
"""
return data
def to_workspace_value(self, result, assets):
"""
Called with a column of the result of a pipeline. This needs to put
the data into a format that can be used in a workspace to continue
doing computations.
Parameters
----------
result : pd.Series
A multiindexed series with (dates, assets) whose values are the
results of running this pipeline term over the dates.
assets : pd.Index
All of the assets being requested. This allows us to correctly
shape the workspace value.
Returns
-------
workspace_value : array-like
An array like value that the engine can consume.
"""
return result.unstack().fillna(self.missing_value).reindex(
columns=assets,
fill_value=self.missing_value,
).values
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def downsample(self, frequency):
"""
Make a term that computes from ``self`` at lower-than-daily frequency.
Parameters
----------
{frequency}
"""
from .mixins import DownsampledMixin
downsampled_type = type(self)._with_mixin(DownsampledMixin)
return downsampled_type(term=self, frequency=frequency)
@templated_docstring(name=PIPELINE_ALIAS_NAME_DOC)
def alias(self, name):
"""
Make a term from ``self`` that names the expression.
Parameters
----------
{name}
Returns
-------
aliased : Aliased
``self`` with a name.
Notes
-----
This is useful for giving a name to a numerical or boolean expression.
"""
from .mixins import AliasedMixin
aliased_type = type(self)._with_mixin(AliasedMixin)
return aliased_type(term=self, name=name)
def isnull(self):
"""
A Filter producing True for values where this Factor has missing data.
Equivalent to self.isnan() when ``self.dtype`` is float64.
Otherwise equivalent to ``self.eq(self.missing_value)``.
Returns
-------
filter : zipline.pipeline.Filter
"""
if self.dtype == bool_dtype:
raise TypeError("isnull() is not supported for Filters")
from .filters import NullFilter
if self.dtype == float64_dtype:
# Using isnan is more efficient when possible because we can fold
# the isnan computation with other NumExpr expressions.
return self.isnan()
else:
return NullFilter(self)
def notnull(self):
"""
A Filter producing True for values where this Factor has complete data.
Equivalent to ``~self.isnan()` when ``self.dtype`` is float64.
Otherwise equivalent to ``(self != self.missing_value)``.
Returns
-------
filter : zipline.pipeline.Filter
"""
if self.dtype == bool_dtype:
raise TypeError("notnull() is not supported for Filters")
from .filters import NotNullFilter
return NotNullFilter(self)
def fillna(self, fill_value):
"""
Create a new term that fills missing values of this term's output with
``fill_value``.
Parameters
----------
fill_value : zipline.pipeline.ComputableTerm, or object.
Object to use as replacement for missing values.
If a ComputableTerm (e.g. a Factor) is passed, that term's results
will be used as fill values.
If a scalar (e.g. a number) is passed, the scalar will be used as a
fill value.
Examples
--------
**Filling with a Scalar:**
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 NaN 3.0 4.0
2017-03-14 1.5 2.5 NaN NaN
Then ``f.fillna(0)`` produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 0.0 3.0 4.0
2017-03-14 1.5 2.5 0.0 0.0
**Filling with a Term:**
Let ``f`` be as above, and let ``g`` be another Factor which would
produce the following output::
AAPL MSFT MCD BK
2017-03-13 10.0 20.0 30.0 40.0
2017-03-14 15.0 25.0 35.0 45.0
Then, ``f.fillna(g)`` produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 20.0 3.0 4.0
2017-03-14 1.5 2.5 35.0 45.0
Returns
-------
filled : zipline.pipeline.ComputableTerm
A term computing the same results as ``self``, but with missing
values filled in using values from ``fill_value``.
"""
if self.dtype == bool_dtype:
raise TypeError("fillna() is not supported for Filters")
if isinstance(fill_value, LoadableTerm):
raise TypeError(
"Can't use expression {} as a fill value. Did you mean to "
"append '.latest?'".format(fill_value)
)
elif isinstance(fill_value, ComputableTerm):
if_false = fill_value
else:
# Assume we got a scalar value. Make sure it's compatible with our
# dtype.
try:
fill_value = _coerce_to_dtype(fill_value, self.dtype)
except TypeError as e:
raise TypeError(
"Fill value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=type(self).__name__,
value=fill_value,
dtype=self.dtype,
error=e,
)
)
if_false = self._constant_type(
const=fill_value,
dtype=self.dtype,
missing_value=self.missing_value,
)
return self.notnull().if_else(if_true=self, if_false=if_false)
@classlazyval
def _constant_type(cls):
from .mixins import ConstantMixin
return cls._with_mixin(ConstantMixin)
@classlazyval
def _if_else_type(cls):
from .mixins import IfElseMixin
return cls._with_mixin(IfElseMixin)
def __repr__(self):
return (
"{type}([{inputs}], {window_length})"
).format(
type=type(self).__name__,
inputs=', '.join(i.recursive_repr() for i in self.inputs),
window_length=self.window_length,
)
def recursive_repr(self):
return type(self).__name__ + '(...)'
@classmethod
def _with_mixin(cls, mixin_type):
return mixin_type.universal_mixin_specialization(
cls._principal_computable_term_type(),
)
def validate_dtype(termname, dtype, missing_value):
"""
Validate a `dtype` and `missing_value` passed to Term.__new__.
Ensures that we know how to represent ``dtype``, and that missing_value
is specified for types without default missing values.
Returns
-------
validated_dtype, validated_missing_value : np.dtype, any
The dtype and missing_value to use for the new term.
Raises
------
DTypeNotSpecified
When no dtype was passed to the instance, and the class doesn't
provide a default.
NotDType
When either the class or the instance provides a value not
coercible to a numpy dtype.
NoDefaultMissingValue
When dtype requires an explicit missing_value, but
``missing_value`` is NotSpecified.
"""
if dtype is NotSpecified:
raise DTypeNotSpecified(termname=termname)
try:
dtype = dtype_class(dtype)
except TypeError:
raise NotDType(dtype=dtype, termname=termname)
if not can_represent_dtype(dtype):
raise UnsupportedDType(dtype=dtype, termname=termname)
if missing_value is NotSpecified:
missing_value = default_missing_value_for_dtype(dtype)
try:
_coerce_to_dtype(missing_value, dtype)
except TypeError as e:
raise TypeError(
"Missing value {value!r} is not a valid choice "
"for term {termname} with dtype {dtype}.\n\n"
"Coercion attempt failed with: {error}".format(
termname=termname,
value=missing_value,
dtype=dtype,
error=e,
)
)
return dtype, missing_value
def _assert_valid_categorical_missing_value(value):
"""
Check that value is a valid categorical missing_value.
Raises a TypeError if the value is cannot be used as the missing_value for
a categorical_dtype Term.
"""
label_types = LabelArray.SUPPORTED_SCALAR_TYPES
if not isinstance(value, label_types):
raise TypeError(
"String-dtype classifiers can only produce strings or None."
.format(types=' or '.join([t.__name__ for t in label_types]))
)
def _coerce_to_dtype(value, dtype):
if dtype == categorical_dtype:
# This check is necessary because we use object dtype for
# categoricals, and numpy will allow us to promote numerical
# values to object even though we don't support them.
_assert_valid_categorical_missing_value(value)
return value
else:
# For any other type, cast using the same rules as numpy's astype
# function with casting='same_kind'.
#
# 'same_kind' allows casting between things like float32 and float64,
# but not between str and int. Note that the name is somewhat
# misleading, since it does allow conversion between different dtype
# kinds in some cases. In particular, conversion from int to float is
# allowed.
return array([value]).astype(dtype=dtype, casting='same_kind')[0] | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/term.py | term.py |
from interface import implements
from zipline.utils.compat import ExitStack, contextmanager, wraps
from .iface import PipelineHooks, PIPELINE_HOOKS_CONTEXT_MANAGERS
from .no import NoHooks
def delegating_hooks_method(method_name):
"""Factory function for making DelegatingHooks methods.
"""
if method_name in PIPELINE_HOOKS_CONTEXT_MANAGERS:
# Generate a contextmanager that enters the context of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
@contextmanager
def ctx(self, *args, **kwargs):
with ExitStack() as stack:
for hook in self._hooks:
sub_ctx = getattr(hook, method_name)(*args, **kwargs)
stack.enter_context(sub_ctx)
yield stack
return ctx
else:
# Generate a method that calls methods of all child hooks.
@wraps(getattr(PipelineHooks, method_name))
def method(self, *args, **kwargs):
for hook in self._hooks:
sub_method = getattr(hook, method_name)
sub_method(*args, **kwargs)
return method
class DelegatingHooks(implements(PipelineHooks)):
"""A PipelineHooks that delegates to one or more other hooks.
Parameters
----------
hooks : list[implements(PipelineHooks)]
Sequence of hooks to delegate to.
"""
def __new__(cls, hooks):
if len(hooks) == 0:
# OPTIMIZATION: Short-circuit to a NoHooks if we don't have any
# sub-hooks.
return NoHooks()
elif len(hooks) == 1:
# OPTIMIZATION: Unwrap delegation layer if we only have one
# sub-hook.
return hooks[0]
else:
self = super(DelegatingHooks, cls).__new__(cls)
self._hooks = hooks
return self
# Implement all interface methods by delegating to corresponding methods on
# input hooks.
locals().update({
name: delegating_hooks_method(name)
# TODO: Expose this publicly on interface.
for name in PipelineHooks._signatures
})
del delegating_hooks_method | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/hooks/delegate.py | delegate.py |
from collections import namedtuple
import time
from interface import implements
from zipline.utils.compat import contextmanager, escape_html
from zipline.utils.string_formatting import bulleted_list
from .iface import PipelineHooks
class ProgressHooks(implements(PipelineHooks)):
"""
Hooks implementation for displaying progress.
Parameters
----------
publisher_factory : callable
Function producing a new object with a ``publish()`` method that takes
a ``ProgressModel`` and publishes progress to a consumer.
"""
def __init__(self, publisher_factory):
self._publisher_factory = publisher_factory
self._reset_transient_state()
def _reset_transient_state(self):
self._start_date = None
self._end_date = None
self._model = None
self._publisher = None
@classmethod
def with_widget_publisher(cls):
"""
Construct a ProgressHooks that publishes to Jupyter via
``IPython.display``.
"""
return cls(publisher_factory=IPythonWidgetProgressPublisher)
@classmethod
def with_static_publisher(cls, publisher):
"""Construct a ProgressHooks that uses an already-constructed publisher.
"""
return cls(publisher_factory=lambda: publisher)
def _publish(self):
self._publisher.publish(self._model)
@contextmanager
def running_pipeline(self, pipeline, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
try:
yield
except Exception:
if self._model is None:
# This will only happen if an error happens in the Pipeline
# Engine beteween entering `running_pipeline` and the first
# `computing_chunk` call. If that happens, just propagate the
# exception.
raise
self._model.finish(success=False)
self._publish()
raise
else:
self._model.finish(success=True)
self._publish()
finally:
self._reset_transient_state()
@contextmanager
def computing_chunk(self, terms, start_date, end_date):
# Set up model on first compute_chunk call.
if self._model is None:
self._publisher = self._publisher_factory()
self._model = ProgressModel(
start_date=self._start_date,
end_date=self._end_date,
)
try:
self._model.start_chunk(terms, start_date, end_date)
self._publish()
yield
finally:
self._model.finish_chunk(terms, start_date, end_date)
self._publish()
@contextmanager
def loading_terms(self, terms):
try:
self._model.start_load_terms(terms)
self._publish()
yield
finally:
self._model.finish_load_terms(terms)
self._publish()
@contextmanager
def computing_term(self, term):
try:
self._model.start_compute_term(term)
self._publish()
yield
finally:
self._model.finish_compute_term(term)
self._publish()
class ProgressModel(object):
"""
Model object for tracking progress of a Pipeline execution.
Parameters
----------
nterms : int
Number of terms in the execution plan of the Pipeline being run.
start_date : pd.Timestamp
Start date of the range over which ``plan`` will be computed.
end_date : pd.Timestamp
End date of the range over which ``plan`` will be computed.
Methods
-------
start_chunk(start_date, end_date)
finish_chunk(start_date, end_date)
load_precomputed_terms(terms)
start_load_terms(terms)
finish_load_terms(terms)
start_compute_term(term)
finish_compute_term(term)
finish(success)
Attributes
----------
state : {'init', 'loading', 'computing', 'error', 'success'}
Current state of the execution.
percent_complete : float
Percent of execution that has been completed, on a scale from 0 to 100.
execution_time : float
Number of seconds that the execution required. Only available if state
is 'error' or 'success'.
execution_bounds : (pd.Timestamp, pd.Timestamp)
Pair of (start_date, end_date) for the entire execution.
current_chunk_bounds : (pd.Timestamp, pd.Timestamp)
Pair of (start_date, end_date) for the currently executing chunk.
current_work : [zipline.pipeline.Term]
List of terms currently being loaded or computed.
"""
def __init__(self, start_date, end_date):
self._start_date = start_date
self._end_date = end_date
# +1 to be inclusive of end_date.
self._total_days = (end_date - start_date).days + 1
self._progress = 0.0
self._days_completed = 0
self._state = 'init'
# Number of days in current chunk.
self._current_chunk_size = None
# (start_date, end_date) of current chunk.
self._current_chunk_bounds = None
# How much should we increment progress by after completing a term?
self._completed_term_increment = None
# How much should we increment progress by after completing a chunk?
# This is zero unless we compute a pipeline with no terms, in which
# case it will be the full chunk percentage.
self._completed_chunk_increment = None
# Terms currently being computed.
self._current_work = None
# Tracking state for total elapsed time.
self._start_time = time.time()
self._end_time = None
# These properties form the interface for Publishers.
@property
def state(self):
return self._state
@property
def percent_complete(self):
return round(self._progress * 100.0, 3)
@property
def execution_time(self):
if self._end_time is None:
raise ValueError(
"Can't get execution_time until execution is complete."
)
return self._end_time - self._start_time
@property
def execution_bounds(self):
return (self._start_date, self._end_date)
@property
def current_chunk_bounds(self):
return self._current_chunk_bounds
@property
def current_work(self):
return self._current_work
# These methods form the interface for ProgressHooks.
def start_chunk(self, terms, start_date, end_date):
days_since_start = (end_date - self._start_date).days + 1
self._current_chunk_size = days_since_start - self._days_completed
self._current_chunk_bounds = (start_date, end_date)
# What percent of our overall progress will happen in this chunk?
chunk_percent = float(self._current_chunk_size) / self._total_days
# How much of that is associated with each completed term?
nterms = len(terms)
if nterms:
self._completed_term_increment = chunk_percent / len(terms)
self._completed_chunk_increment = 0.0
else:
# Special case. If we don't have any terms, increment the entire
# chunk's worth of progress when we finish the chunk.
self._completed_term_increment = 0.0
self._completed_chunk_increment = chunk_percent
def finish_chunk(self, terms, start_date, end_date):
self._days_completed += self._current_chunk_size
self._progress += self._completed_chunk_increment
def start_load_terms(self, terms):
self._state = 'loading'
self._current_work = terms
def finish_load_terms(self, terms):
self._finish_terms(nterms=len(terms))
def start_compute_term(self, term):
self._state = 'computing'
self._current_work = [term]
def finish_compute_term(self, term):
self._finish_terms(nterms=1)
def finish(self, success):
self._end_time = time.time()
if success:
self._state = 'success'
else:
self._state = 'error'
def _finish_terms(self, nterms):
self._progress += nterms * self._completed_term_increment
try:
import ipywidgets
HAVE_WIDGETS = True
# This VBox subclass exists to work around a strange display issue but
# where the repr of the progress bar sometimes gets re-displayed upon
# re-opening the notebook, even after the bar has closed. The repr of VBox
# is somewhat noisy, so we replace it here with a version that just returns
# an empty string.
class ProgressBarContainer(ipywidgets.VBox):
def __repr__(self):
return ""
except ImportError:
HAVE_WIDGETS = False
try:
from IPython.display import display, HTML as IPython_HTML
HAVE_IPYTHON = True
except ImportError:
HAVE_IPYTHON = False
# XXX: This class is currently untested, because we don't require ipywidgets as
# a test dependency. Be careful if you make changes to this.
class IPythonWidgetProgressPublisher(object):
"""A progress publisher that publishes to an IPython/Jupyter widget.
"""
def __init__(self):
missing = []
if not HAVE_WIDGETS:
missing.append('ipywidgets')
elif not HAVE_IPYTHON:
missing.append('IPython')
if missing:
raise ValueError(
"IPythonWidgetProgressPublisher needs ipywidgets and IPython:"
"\nMissing:\n{}".format(bulleted_list(missing))
)
# Heading for progress display.
self._heading = ipywidgets.HTML()
# Percent Complete Indicator to the left of the bar.
indicator_width = '120px'
self._percent_indicator = ipywidgets.HTML(
layout={'width': indicator_width},
)
# The progress bar itself.
self._bar = ipywidgets.FloatProgress(
value=0.0,
min=0.0,
max=100.0,
bar_style='info',
# Leave enough space for the percent indicator.
layout={'width': 'calc(100% - {})'.format(indicator_width)},
)
bar_and_percent = ipywidgets.HBox([self._percent_indicator, self._bar])
# Collapsable details tab underneath the progress bar.
self._details_body = ipywidgets.HTML()
self._details_tab = ipywidgets.Accordion(
children=[self._details_body],
selected_index=None, # Start in collapsed state.
layout={
# Override default border settings to make details tab less
# heavy.
'border': '1px',
},
)
# There's no public interface for setting title in the constructor :/.
self._details_tab.set_title(0, 'Details')
# Container for the combined widget.
self._layout = ProgressBarContainer(
[
self._heading,
bar_and_percent,
self._details_tab,
],
# Overall layout consumes 75% of the page.
layout={'width': '75%'},
)
self._displayed = False
def publish(self, model):
if model.state == 'init':
self._heading.value = '<b>Analyzing Pipeline...</b>'
self._set_progress(0.0)
self._ensure_displayed()
elif model.state in ('loading', 'computing'):
term_list = self._render_term_list(model.current_work)
if model.state == 'loading':
details_heading = '<b>Loading Inputs:</b>'
else:
details_heading = '<b>Computing Expression:</b>'
self._details_body.value = details_heading + term_list
chunk_start, chunk_end = model.current_chunk_bounds
self._heading.value = (
"<b>Running Pipeline</b>: Chunk Start={}, Chunk End={}"
.format(chunk_start.date(), chunk_end.date())
)
self._set_progress(model.percent_complete)
self._ensure_displayed()
elif model.state == 'success':
# Replace widget layout with html that can be persisted.
self._stop_displaying()
display(
IPython_HTML("<b>Pipeline Execution Time:</b> {}".format(
self._format_execution_time(model.execution_time)
)),
)
elif model.state == 'error':
self._bar.bar_style = 'danger'
self._stop_displaying()
else:
self._layout.close()
raise ValueError('Unknown display state: {!r}'.format(model.state))
def _ensure_displayed(self):
if not self._displayed:
display(self._layout)
self._displayed = True
def _stop_displaying(self):
self._layout.close()
@staticmethod
def _render_term_list(terms):
list_elements = ''.join([
'<li><pre>{}</pre></li>'.format(repr_htmlsafe(t))
for t in terms
])
return '<ul>{}</ul>'.format(list_elements)
def _set_progress(self, percent_complete):
self._bar.value = percent_complete
self._percent_indicator.value = (
"<b>{:.2f}% Complete</b>".format(percent_complete)
)
@staticmethod
def _format_execution_time(total_seconds):
"""Helper method for displaying total execution time of a Pipeline.
Parameters
----------
total_seconds : float
Number of seconds elapsed.
Returns
-------
formatted : str
User-facing text representation of elapsed time.
"""
def maybe_s(n):
if n == 1:
return ''
return 's'
minutes, seconds = divmod(total_seconds, 60)
minutes = int(minutes)
if minutes >= 60:
hours, minutes = divmod(minutes, 60)
t = "{hours} Hour{hs}, {minutes} Minute{ms}, {seconds:.2f} Seconds"
return t.format(
hours=hours, hs=maybe_s(hours),
minutes=minutes, ms=maybe_s(minutes),
seconds=seconds,
)
elif minutes >= 1:
t = "{minutes} Minute{ms}, {seconds:.2f} Seconds"
return t.format(
minutes=minutes,
ms=maybe_s(minutes),
seconds=seconds,
)
else:
return "{seconds:.2f} Seconds".format(seconds=seconds)
class TestingProgressPublisher(object):
"""A progress publisher that records a trace of model states for testing.
"""
TraceState = namedtuple('TraceState', [
'state',
'percent_complete',
'execution_bounds',
'current_chunk_bounds',
'current_work',
])
def __init__(self):
self.trace = []
def publish(self, model):
self.trace.append(
self.TraceState(
state=model.state,
percent_complete=model.percent_complete,
execution_bounds=model.execution_bounds,
current_chunk_bounds=model.current_chunk_bounds,
current_work=model.current_work
),
)
def repr_htmlsafe(t):
"""Repr a value and html-escape the result.
If an error is thrown by the repr, show a placeholder.
"""
try:
r = repr(t)
except Exception:
r = "(Error Displaying {})".format(type(t).__name__)
return escape_html(str(r), quote=True) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/hooks/progress.py | progress.py |
from zipline.utils.compat import contextmanager as _contextmanager
from interface import Interface
# Keep track of which methods of PipelineHooks are contextmanagers. Used by
# DelegatingHooks to properly delegate to sub-hooks.
PIPELINE_HOOKS_CONTEXT_MANAGERS = set()
def contextmanager(f):
"""
Wrapper for contextlib.contextmanager that tracks which methods of
PipelineHooks are contextmanagers in CONTEXT_MANAGER_METHODS.
"""
PIPELINE_HOOKS_CONTEXT_MANAGERS.add(f.__name__)
return _contextmanager(f)
class PipelineHooks(Interface):
"""
Interface for instrumenting SimplePipelineEngine executions.
Methods with names like 'on_event()' should be normal methods. They will be
called by the engine after the corresponding event.
Methods with names like 'doing_thing()' should be context managers. They
will be entered by the engine around the corresponding event.
Methods
-------
running_pipeline(self, pipeline, start_date, end_date, chunked)
computing_chunk(self, terms, start_date, end_date)
loading_terms(self, terms)
computing_term(self, term):
"""
@contextmanager
def running_pipeline(self, pipeline, start_date, end_date):
"""
Contextmanager entered during execution of run_pipeline or
run_chunked_pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline being executed.
start_date : pd.Timestamp
First date of the execution.
end_date : pd.Timestamp
Last date of the execution.
"""
@contextmanager
def computing_chunk(self, terms, start_date, end_date):
"""
Contextmanager entered during execution of compute_chunk.
Parameters
----------
terms : list[zipline.pipeline.Term]
List of terms, in execution order, that will be computed. This
value may change between chunks if ``populate_initial_workspace``
prepopulates different terms at different times.
start_date : pd.Timestamp
First date of the chunk.
end_date : pd.Timestamp
Last date of the chunk.
"""
@contextmanager
def loading_terms(self, terms):
"""Contextmanager entered when loading a batch of LoadableTerms.
Parameters
----------
terms : list[zipline.pipeline.LoadableTerm]
Terms being loaded.
"""
@contextmanager
def computing_term(self, term):
"""Contextmanager entered when computing a ComputableTerm.
Parameters
----------
terms : zipline.pipeline.ComputableTerm
Terms being computed.
""" | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/hooks/iface.py | iface.py |
from __future__ import division
from numpy import (
abs,
average,
clip,
diff,
dstack,
inf,
)
from numexpr import evaluate
from zipline.pipeline.data import EquityPricing
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nanmin,
)
from zipline.utils.numpy_utils import rolling_window
from .basic import exponential_weights
from .basic import ( # noqa reexport
# These are re-exported here for backwards compatibility with the old
# definition site.
LinearWeightedMovingAverage,
MaxDrawdown,
SimpleMovingAverage,
VWAP,
WeightedAverageValue
)
class RSI(SingleInputMixin, CustomFactor):
"""
Relative Strength Index
**Default Inputs**: :data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length**: 15
"""
window_length = 15
inputs = (EquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (EquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator
**Defaults Inputs:** :data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
""" # noqa
inputs = (EquityPricing.low, EquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
""" # noqa
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`, \
:data:`zipline.pipeline.data.EquityPricing.low`, \
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
EquityPricing.high,
EquityPricing.low,
EquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (EquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
# Convenience aliases.
MACDSignal = MovingAverageConvergenceDivergenceSignal | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/factors/technical.py | technical.py |
from numexpr import evaluate
import numpy as np
from numpy import broadcast_arrays
from scipy.stats import (
linregress,
spearmanr,
)
from zipline.assets import Asset
from zipline.errors import IncompatibleTerms
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.filters import SingleAsset
from zipline.pipeline.mixins import StandardOutputs
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import AssetExists
from zipline.utils.input_validation import (
expect_bounded,
expect_dtypes,
expect_types,
)
from zipline.utils.math_utils import nanmean
from zipline.utils.numpy_utils import (
float64_dtype,
int64_dtype,
)
from .basic import Returns
ALLOWED_DTYPES = (float64_dtype, int64_dtype)
class _RollingCorrelation(CustomFactor):
@expect_dtypes(base_factor=ALLOWED_DTYPES, target=ALLOWED_DTYPES)
@expect_bounded(correlation_length=(2, None))
def __new__(cls,
base_factor,
target,
correlation_length,
mask=NotSpecified):
if target.ndim == 2 and base_factor.mask is not target.mask:
raise IncompatibleTerms(term_1=base_factor, term_2=target)
return super(_RollingCorrelation, cls).__new__(
cls,
inputs=[base_factor, target],
window_length=correlation_length,
mask=mask,
)
class RollingPearson(_RollingCorrelation):
"""
A Factor that computes pearson correlation coefficients between the columns
of a given Factor and either the columns of another Factor/BoundColumn or a
slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.pearsonr`
:meth:`Factor.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
Notes
-----
Most users should call Factor.pearsonr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
vectorized_pearson_r(
base_data,
target_data,
allowed_missing=0,
out=out,
)
class RollingSpearman(_RollingCorrelation):
"""
A Factor that computes spearman rank correlation coefficients between the
columns of a given Factor and either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
base_factor : zipline.pipeline.Factor
The factor for which to compute correlations of each of its columns
with `target`.
target : zipline.pipeline.Term with a numeric dtype
The term with which to compute correlations against each column of data
produced by `base_factor`. This term may be a Factor, a BoundColumn or
a Slice. If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `base_factor` should have
their correlation with `target` computed each day.
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.spearmanr`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
Notes
-----
Most users should call Factor.spearmanr rather than directly construct an
instance of this class.
"""
window_safe = True
def compute(self, today, assets, out, base_data, target_data):
# If `target_data` is a Slice or single column of data, broadcast it
# out to the same shape as `base_data`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
target_data = broadcast_arrays(target_data, base_data)[0]
for i in range(len(out)):
out[i] = spearmanr(base_data[:, i], target_data[:, i])[0]
class RollingLinearRegression(CustomFactor):
"""
A Factor that performs an ordinary least-squares regression predicting the
columns of a given Factor from either the columns of another
Factor/BoundColumn or a slice/single column of data.
Parameters
----------
dependent : zipline.pipeline.Factor
The factor whose columns are the predicted/dependent variable of each
regression with `independent`.
independent : zipline.pipeline.slice.Slice or zipline.pipeline.Factor
The factor/slice whose columns are the predictor/independent variable
of each regression with `dependent`. If `independent` is a Factor,
regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets (columns) of `dependent` should be
regressed against `independent` each day.
See Also
--------
:func:`scipy.stats.linregress`
:meth:`Factor.linear_regression`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
Notes
-----
Most users should call Factor.linear_regression rather than directly
construct an instance of this class.
"""
outputs = ['alpha', 'beta', 'r_value', 'p_value', 'stderr']
@expect_dtypes(dependent=ALLOWED_DTYPES, independent=ALLOWED_DTYPES)
@expect_bounded(regression_length=(2, None))
def __new__(cls,
dependent,
independent,
regression_length,
mask=NotSpecified):
if independent.ndim == 2 and dependent.mask is not independent.mask:
raise IncompatibleTerms(term_1=dependent, term_2=independent)
return super(RollingLinearRegression, cls).__new__(
cls,
inputs=[dependent, independent],
window_length=regression_length,
mask=mask,
)
def compute(self, today, assets, out, dependent, independent):
alpha = out.alpha
beta = out.beta
r_value = out.r_value
p_value = out.p_value
stderr = out.stderr
def regress(y, x):
regr_results = linregress(y=y, x=x)
# `linregress` returns its results in the following order:
# slope, intercept, r-value, p-value, stderr
alpha[i] = regr_results[1]
beta[i] = regr_results[0]
r_value[i] = regr_results[2]
p_value[i] = regr_results[3]
stderr[i] = regr_results[4]
# If `independent` is a Slice or single column of data, broadcast it
# out to the same shape as `dependent`, then compute column-wise. This
# is efficient because each column of the broadcasted array only refers
# to a single memory location.
independent = broadcast_arrays(independent, dependent)[0]
for i in range(len(out)):
regress(y=dependent[:, i], x=independent[:, i])
class RollingPearsonOfReturns(RollingPearson):
"""
Calculates the Pearson product-moment correlation coefficient of the
returns of the given asset with the returns of all other assets.
Pearson correlation is what most people mean when they say "correlation
coefficient" or "R-value".
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in SPY's rolling returns correlation with each
stock from 2017-03-17 to 2017-03-22, using a 5-day look back window (that
is, we calculate each correlation coefficient over 5 days of data). We can
achieve this by doing::
rolling_correlations = RollingPearsonOfReturns(
target=sid(8554),
returns_length=10,
correlation_length=5,
)
The result of computing ``rolling_correlations`` from 2017-03-17 to
2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .15 -.96
2017-03-20 1 .10 -.96
2017-03-21 1 -.16 -.94
2017-03-22 1 -.16 -.85
Note that the column for SPY is all 1's, as the correlation of any data
series with itself is always 1. To understand how each of the other values
were calculated, take for example the .15 in MSFT's column. This is the
correlation coefficient between SPY's returns looking back from 2017-03-17
(-.03, -.02, -.01, 0, .01) and MSFT's returns (.03, -.03, .02, -.02, .04).
See Also
--------
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingPearsonOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingSpearmanOfReturns(RollingSpearman):
"""
Calculates the Spearman rank correlation coefficient of the returns of the
given asset with the returns of all other assets.
Parameters
----------
target : zipline.assets.Asset
The asset to correlate with all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
correlation_length : int >= 1
Length of the lookback window over which to compute each correlation
coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with the
target asset computed each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which correlations are computed.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingLinearRegressionOfReturns`
"""
def __new__(cls,
target,
returns_length,
correlation_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingSpearmanOfReturns, cls).__new__(
cls,
base_factor=returns,
target=returns[target],
correlation_length=correlation_length,
mask=mask,
)
class RollingLinearRegressionOfReturns(RollingLinearRegression):
"""
Perform an ordinary least-squares regression predicting the returns of all
other assets on the given asset.
Parameters
----------
target : zipline.assets.Asset
The asset to regress against all other assets.
returns_length : int >= 2
Length of the lookback window over which to compute returns. Daily
returns require a window length of 2.
regression_length : int >= 1
Length of the lookback window over which to compute each regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed against the target
asset each day.
Notes
-----
Computing this factor over many assets can be time consuming. It is
recommended that a mask be used in order to limit the number of assets over
which regressions are computed.
This factor is designed to return five outputs:
- alpha, a factor that computes the intercepts of each regression.
- beta, a factor that computes the slopes of each regression.
- r_value, a factor that computes the correlation coefficient of each
regression.
- p_value, a factor that computes, for each regression, the two-sided
p-value for a hypothesis test whose null hypothesis is that the slope is
zero.
- stderr, a factor that computes the standard error of the estimate of each
regression.
For more help on factors with multiple outputs, see
:class:`zipline.pipeline.CustomFactor`.
Examples
--------
Let the following be example 10-day returns for three different assets::
SPY MSFT FB
2017-03-13 -.03 .03 .04
2017-03-14 -.02 -.03 .02
2017-03-15 -.01 .02 .01
2017-03-16 0 -.02 .01
2017-03-17 .01 .04 -.01
2017-03-20 .02 -.03 -.02
2017-03-21 .03 .01 -.02
2017-03-22 .04 -.02 -.02
Suppose we are interested in predicting each stock's returns from SPY's
over rolling 5-day look back windows. We can compute rolling regression
coefficients (alpha and beta) from 2017-03-17 to 2017-03-22 by doing::
regression_factor = RollingRegressionOfReturns(
target=sid(8554),
returns_length=10,
regression_length=5,
)
alpha = regression_factor.alpha
beta = regression_factor.beta
The result of computing ``alpha`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 0 .011 .003
2017-03-20 0 -.004 .004
2017-03-21 0 .007 .006
2017-03-22 0 .002 .008
And the result of computing ``beta`` from 2017-03-17 to 2017-03-22 gives::
SPY MSFT FB
2017-03-17 1 .3 -1.1
2017-03-20 1 .2 -1
2017-03-21 1 -.3 -1
2017-03-22 1 -.3 -.9
Note that SPY's column for alpha is all 0's and for beta is all 1's, as the
regression line of SPY with itself is simply the function y = x.
To understand how each of the other values were calculated, take for
example MSFT's ``alpha`` and ``beta`` values on 2017-03-17 (.011 and .3,
respectively). These values are the result of running a linear regression
predicting MSFT's returns from SPY's returns, using values starting at
2017-03-17 and looking back 5 days. That is, the regression was run with
x = [-.03, -.02, -.01, 0, .01] and y = [.03, -.03, .02, -.02, .04], and it
produced a slope of .3 and an intercept of .011.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:class:`zipline.pipeline.factors.RollingSpearmanOfReturns`
"""
window_safe = True
def __new__(cls,
target,
returns_length,
regression_length,
mask=NotSpecified):
# Use the `SingleAsset` filter here because it protects against
# inputting a non-existent target asset.
returns = Returns(
window_length=returns_length,
mask=(AssetExists() | SingleAsset(asset=target)),
)
return super(RollingLinearRegressionOfReturns, cls).__new__(
cls,
dependent=returns,
independent=returns[target],
regression_length=regression_length,
mask=mask,
)
class SimpleBeta(CustomFactor, StandardOutputs):
"""
Factor producing the slope of a regression line between each asset's daily
returns to the daily returns of a single "target" asset.
Parameters
----------
target : zipline.Asset
Asset against which other assets should be regressed.
regression_length : int
Number of days of daily returns to use for the regression.
allowed_missing_percentage : float, optional
Percentage of returns observations (between 0 and 1) that are allowed
to be missing when calculating betas. Assets with more than this
percentage of returns observations missing will produce values of
NaN. Default behavior is that 25% of inputs can be missing.
"""
window_safe = True
dtype = float64_dtype
params = ('allowed_missing_count',)
@expect_types(
target=Asset,
regression_length=int,
allowed_missing_percentage=(int, float),
__funcname='SimpleBeta',
)
@expect_bounded(
regression_length=(3, None),
allowed_missing_percentage=(0.0, 1.0),
__funcname='SimpleBeta',
)
def __new__(cls,
target,
regression_length,
allowed_missing_percentage=0.25):
daily_returns = Returns(
window_length=2,
mask=(AssetExists() | SingleAsset(asset=target)),
)
allowed_missing_count = int(
allowed_missing_percentage * regression_length
)
return super(SimpleBeta, cls).__new__(
cls,
inputs=[daily_returns, daily_returns[target]],
window_length=regression_length,
allowed_missing_count=allowed_missing_count,
)
def compute(self,
today,
assets,
out,
all_returns,
target_returns,
allowed_missing_count):
vectorized_beta(
dependents=all_returns,
independent=target_returns,
allowed_missing=allowed_missing_count,
out=out,
)
def graph_repr(self):
return "{}({!r}, {}, {})".format(
type(self).__name__,
str(self.target.symbol), # coerce from unicode to str in py2.
self.window_length,
self.params['allowed_missing_count'],
)
@property
def target(self):
"""Get the target of the beta calculation.
"""
return self.inputs[1].asset
def __repr__(self):
return "{}({}, length={}, allowed_missing={})".format(
type(self).__name__,
self.target,
self.window_length,
self.params['allowed_missing_count'],
)
def vectorized_beta(dependents, independent, allowed_missing, out=None):
"""
Compute slopes of linear regressions between columns of ``dependents`` and
``independent``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independent : np.array[N, 1]
Independent variable of the regression
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the regression coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
slopes : np.array[M]
Linear regression coefficients for each column of ``dependents``.
"""
# Cache these as locals since we're going to call them multiple times.
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
# Copy N times as a column vector and fill with nans to have the same
# missing value pattern as the dependent variable.
#
# PERF_TODO: We could probably avoid the space blowup by doing this in
# Cython.
# shape: (N, M)
independent = np.where(
isnan(dependents),
nan,
independent,
)
# Calculate beta as Cov(X, Y) / Cov(X, X).
# https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line # noqa
#
# NOTE: The usual formula for covariance is::
#
# mean((X - mean(X)) * (Y - mean(Y)))
#
# However, we don't actually need to take the mean of both sides of the
# product, because of the folllowing equivalence::
#
# Let X_res = (X - mean(X)).
# We have:
#
# mean(X_res * (Y - mean(Y))) = mean(X_res * (Y - mean(Y)))
# (1) = mean((X_res * Y) - (X_res * mean(Y)))
# (2) = mean(X_res * Y) - mean(X_res * mean(Y))
# (3) = mean(X_res * Y) - mean(X_res) * mean(Y)
# (4) = mean(X_res * Y) - 0 * mean(Y)
# (5) = mean(X_res * Y)
#
#
# The tricky step in the above derivation is step (4). We know that
# mean(X_res) is zero because, for any X:
#
# mean(X - mean(X)) = mean(X) - mean(X) = 0.
#
# The upshot of this is that we only have to center one of `independent`
# and `dependent` when calculating covariances. Since we need the centered
# `independent` to calculate its variance in the next step, we choose to
# center `independent`.
# shape: (N, M)
ind_residual = independent - nanmean(independent, axis=0)
# shape: (M,)
covariances = nanmean(ind_residual * dependents, axis=0)
# We end up with different variances in each column here because each
# column may have a different subset of the data dropped due to missing
# data in the corresponding dependent column.
# shape: (M,)
independent_variances = nanmean(ind_residual ** 2, axis=0)
# shape: (M,)
np.divide(covariances, independent_variances, out=out)
# Write nans back to locations where we have more then allowed number of
# missing entries.
nanlocs = isnan(independent).sum(axis=0) > allowed_missing
out[nanlocs] = nan
return out
def vectorized_pearson_r(dependents, independents, allowed_missing, out=None):
"""
Compute Pearson's r between columns of ``dependents`` and ``independents``.
Parameters
----------
dependents : np.array[N, M]
Array with columns of data to be regressed against ``independent``.
independents : np.array[N, M] or np.array[N, 1]
Independent variable(s) of the regression. If a single column is
passed, it is broadcast to the shape of ``dependents``.
allowed_missing : int
Number of allowed missing (NaN) observations per column. Columns with
more than this many non-nan observations in either ``dependents`` or
``independents`` will output NaN as the correlation coefficient.
out : np.array[M] or None, optional
Output array into which to write results. If None, a new array is
created and returned.
Returns
-------
correlations : np.array[M]
Pearson correlation coefficients for each column of ``dependents``.
See Also
--------
:class:`zipline.pipeline.factors.RollingPearson`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
"""
nan = np.nan
isnan = np.isnan
N, M = dependents.shape
if out is None:
out = np.full(M, nan)
if allowed_missing > 0:
# If we're handling nans robustly, we need to mask both arrays to
# locations where either was nan.
either_nan = isnan(dependents) | isnan(independents)
independents = np.where(either_nan, nan, independents)
dependents = np.where(either_nan, nan, dependents)
mean = nanmean
else:
# Otherwise, we can just use mean, which will give us a nan for any
# column where there's ever a nan.
mean = np.mean
# Pearson R is Cov(X, Y) / StdDev(X) * StdDev(Y)
# c.f. https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
ind_residual = independents - mean(independents, axis=0)
dep_residual = dependents - mean(dependents, axis=0)
ind_variance = mean(ind_residual ** 2, axis=0)
dep_variance = mean(dep_residual ** 2, axis=0)
covariances = mean(ind_residual * dep_residual, axis=0)
evaluate(
'where(mask, nan, cov / sqrt(ind_variance * dep_variance))',
local_dict={'cov': covariances,
'mask': isnan(independents).sum(axis=0) > allowed_missing,
'nan': np.nan,
'ind_variance': ind_variance,
'dep_variance': dep_variance},
global_dict={},
out=out,
)
return out | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/factors/statistical.py | statistical.py |
from operator import attrgetter
from numbers import Number
from math import ceil
from textwrap import dedent
from numpy import empty_like, inf, isnan, nan, where
from scipy.stats import rankdata
from zipline.utils.compat import wraps
from zipline.errors import (
BadPercentileBounds,
UnknownRankMethod,
UnsupportedDataType,
)
from zipline.lib.normalize import naive_grouped_rowwise_apply
from zipline.lib.rank import masked_rankdata_2d, rankdata_1d_descending
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.classifiers import Classifier, Everything, Quantiles
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
COMPARISONS,
is_comparison,
MATH_BINOPS,
method_name_for_op,
NumericalExpression,
NUMEXPR_MATH_FUNCS,
UNARY_OPS,
unary_op_name,
)
from zipline.pipeline.filters import (
Filter,
NumExprFilter,
PercentileFilter,
MaximumFilter,
)
from zipline.pipeline.mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
)
from zipline.pipeline.sentinels import NotSpecified, NotSpecifiedType
from zipline.pipeline.term import AssetExists, ComputableTerm, Term
from zipline.utils.functional import with_doc, with_name
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanmax,
nanmean,
nanmedian,
nanmin,
nanstd,
nansum,
)
from zipline.utils.numpy_utils import (
as_column,
bool_dtype,
coerce_to_dtype,
float64_dtype,
is_missing,
)
from zipline.utils.sharedoc import templated_docstring
_RANK_METHODS = frozenset(['average', 'min', 'max', 'dense', 'ordinal'])
def coerce_numbers_to_my_dtype(f):
"""
A decorator for methods whose signature is f(self, other) that coerces
``other`` to ``self.dtype``.
This is used to make comparison operations between numbers and `Factor`
instances work independently of whether the user supplies a float or
integer literal.
For example, if I write::
my_filter = my_factor > 3
my_factor probably has dtype float64, but 3 is an int, so we want to coerce
to float64 before doing the comparison.
"""
@wraps(f)
def method(self, other):
if isinstance(other, Number):
other = coerce_to_dtype(self.dtype, other)
return f(self, other)
return method
def binop_return_dtype(op, left, right):
"""
Compute the expected return dtype for the given binary operator.
Parameters
----------
op : str
Operator symbol, (e.g. '+', '-', ...).
left : numpy.dtype
Dtype of left hand side.
right : numpy.dtype
Dtype of right hand side.
Returns
-------
outdtype : numpy.dtype
The dtype of the result of `left <op> right`.
"""
if is_comparison(op):
if left != right:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Comparisons are only supported between Factors of equal "
"dtypes.".format(left=left, op=op, right=right)
)
return bool_dtype
elif left != float64_dtype or right != float64_dtype:
raise TypeError(
"Don't know how to compute {left} {op} {right}.\n"
"Arithmetic operators are only supported between Factors of "
"dtype 'float64'.".format(
left=left.name,
op=op,
right=right.name,
)
)
return float64_dtype
BINOP_DOCSTRING_TEMPLATE = """
Construct a :class:`~zipline.pipeline.{rtype}` computing ``self {op} other``.
Parameters
----------
other : zipline.pipeline.Factor, float
Right-hand side of the expression.
Returns
-------
{ret}
"""
BINOP_RETURN_FILTER = """\
filter : zipline.pipeline.Filter
Filter computing ``self {op} other`` with the outputs of ``self`` and
``other``.
"""
BINOP_RETURN_FACTOR = """\
factor : zipline.pipeline.Factor
Factor computing ``self {op} other`` with outputs of ``self`` and
``other``.
"""
def binary_operator(op):
"""
Factory function for making binary operator methods on a Factor subclass.
Returns a function, "binary_operator" suitable for implementing functions
like __add__.
"""
# When combining a Factor with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted implementation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
is_compare = is_comparison(op)
if is_compare:
ret_doc = BINOP_RETURN_FILTER.format(op=op)
rtype = 'Filter'
else:
ret_doc = BINOP_RETURN_FACTOR.format(op=op)
rtype = 'Factor'
docstring = BINOP_DOCSTRING_TEMPLATE.format(
op=op,
ret=ret_doc,
rtype=rtype,
)
@with_doc(docstring)
@with_name(method_name_for_op(op))
@coerce_numbers_to_my_dtype
def binary_operator(self, other):
# This can't be hoisted up a scope because the types returned by
# binop_return_type aren't defined when the top-level function is
# invoked in the class body of Factor.
return_type = NumExprFilter if is_compare else NumExprFactor
if isinstance(self, NumExprFactor):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return return_type(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, NumExprFactor):
# NumericalExpression overrides ops to correctly handle merging of
# inputs. Look up and call the appropriate reflected operator with
# ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if self is other:
return return_type(
"x_0 {op} x_0".format(op=op),
(self,),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
return return_type(
"x_0 {op} x_1".format(op=op),
(self, other),
dtype=binop_return_dtype(op, self.dtype, other.dtype),
)
elif isinstance(other, Number):
return return_type(
"x_0 {op} ({constant})".format(op=op, constant=other),
binds=(self,),
# .dtype access is safe here because coerce_numbers_to_my_dtype
# will convert any input numbers to numpy equivalents.
dtype=binop_return_dtype(op, self.dtype, other.dtype)
)
raise BadBinaryOperator(op, self, other)
return binary_operator
def reflected_binary_operator(op):
"""
Factory function for making binary operator methods on a Factor.
Returns a function, "reflected_binary_operator" suitable for implementing
functions like __radd__.
"""
assert not is_comparison(op)
@with_name(method_name_for_op(op, commute=True))
@coerce_numbers_to_my_dtype
def reflected_binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other
)
return NumExprFactor(
"({left}) {op} ({right})".format(
left=other_expr,
right=self_expr,
op=op,
),
new_inputs,
dtype=binop_return_dtype(op, other.dtype, self.dtype)
)
# Only have to handle the numeric case because in all other valid cases
# the corresponding left-binding method will be called.
elif isinstance(other, Number):
return NumExprFactor(
"{constant} {op} x_0".format(op=op, constant=other),
binds=(self,),
dtype=binop_return_dtype(op, other.dtype, self.dtype),
)
raise BadBinaryOperator(op, other, self)
return reflected_binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Factors.
"""
# Only negate is currently supported.
valid_ops = {'-'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
@with_doc("Unary Operator: '%s'" % op)
@with_name(unary_op_name(op))
def unary_operator(self):
if self.dtype != float64_dtype:
raise TypeError(
"Can't apply unary operator {op!r} to instance of "
"{typename!r} with dtype {dtypename!r}.\n"
"{op!r} is only supported for Factors of dtype "
"'float64'.".format(
op=op,
typename=type(self).__name__,
dtypename=self.dtype.name,
)
)
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{op}x_0".format(op=op),
(self,),
dtype=float64_dtype,
)
return unary_operator
def function_application(func):
"""
Factory function for producing function application methods for Factor
subclasses.
"""
if func not in NUMEXPR_MATH_FUNCS:
raise ValueError("Unsupported mathematical function '%s'" % func)
docstring = dedent(
"""\
Construct a Factor that computes ``{}()`` on each output of ``self``.
Returns
-------
factor : zipline.pipeline.Factor
""".format(func)
)
@with_doc(docstring)
@with_name(func)
def mathfunc(self):
if isinstance(self, NumericalExpression):
return NumExprFactor(
"{func}({expr})".format(func=func, expr=self._expr),
self.inputs,
dtype=float64_dtype,
)
else:
return NumExprFactor(
"{func}(x_0)".format(func=func),
(self,),
dtype=float64_dtype,
)
return mathfunc
# Decorators for Factor methods.
if_not_float64_tell_caller_to_use_isnull = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() was called on a factor of dtype {received_dtype}.\n"
"{method_name}() is only defined for dtype {expected_dtype}."
"To filter missing data, use isnull() or notnull()."
)
)
float64_only = restrict_to_dtype(
dtype=float64_dtype,
message_template=(
"{method_name}() is only defined on Factors of dtype {expected_dtype},"
" but it was called on a Factor of dtype {received_dtype}."
)
)
CORRELATION_METHOD_NOTE = dedent(
"""\
This method can only be called on expressions which are deemed safe for use
as inputs to windowed :class:`~zipline.pipeline.Factor` objects. Examples
of such expressions include This includes
:class:`~zipline.pipeline.data.BoundColumn`
:class:`~zipline.pipeline.factors.Returns` and any factors created from
:meth:`~zipline.pipeline.Factor.rank` or
:meth:`~zipline.pipeline.Factor.zscore`.
"""
)
class summary_funcs(object):
"""Namespace of functions meant to be used with DailySummary.
"""
@staticmethod
def mean(a, missing_value):
return nanmean(a, axis=1)
@staticmethod
def stddev(a, missing_value):
return nanstd(a, axis=1)
@staticmethod
def max(a, missing_value):
return nanmax(a, axis=1)
@staticmethod
def min(a, missing_value):
return nanmin(a, axis=1)
@staticmethod
def median(a, missing_value):
return nanmedian(a, axis=1)
@staticmethod
def sum(a, missing_value):
return nansum(a, axis=1)
@staticmethod
def notnull_count(a, missing_value):
return (~is_missing(a, missing_value)).sum(axis=1)
names = {k for k in locals() if not k.startswith('_')}
def summary_method(name):
func = getattr(summary_funcs, name)
@expect_types(mask=(Filter, NotSpecifiedType))
@float64_only
def f(self, mask=NotSpecified):
"""Create a 1-dimensional factor computing the {} of self, each day.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing results.
If supplied, we ignore asset/date pairs where ``mask`` produces
``False``.
Returns
-------
result : zipline.pipeline.Factor
"""
return DailySummary(
func,
self,
mask=mask,
dtype=self.dtype,
)
f.__name__ = func.__name__
f.__doc__ = f.__doc__.format(f.__name__)
return f
class Factor(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline API expression producing a numerical or date-valued output.
Factors are the most commonly-used Pipeline term, representing the result
of any computation producing a numerical result.
Factors can be combined, both with other Factors and with scalar values,
via any of the builtin mathematical operators (``+``, ``-``, ``*``, etc).
This makes it easy to write complex expressions that combine multiple
Factors. For example, constructing a Factor that computes the average of
two other Factors is simply::
>>> f1 = SomeFactor(...) # doctest: +SKIP
>>> f2 = SomeOtherFactor(...) # doctest: +SKIP
>>> average = (f1 + f2) / 2.0 # doctest: +SKIP
Factors can also be converted into :class:`zipline.pipeline.Filter` objects
via comparison operators: (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``).
There are many natural operators defined on Factors besides the basic
numerical operators. These include methods for identifying missing or
extreme-valued outputs (:meth:`isnull`, :meth:`notnull`, :meth:`isnan`,
:meth:`notnan`), methods for normalizing outputs (:meth:`rank`,
:meth:`demean`, :meth:`zscore`), and methods for constructing Filters based
on rank-order properties of results (:meth:`top`, :meth:`bottom`,
:meth:`percentile_between`).
"""
ALLOWED_DTYPES = FACTOR_DTYPES # Used by RestrictedDTypeMixin
# Dynamically add functions for creating NumExprFactor/NumExprFilter
# instances.
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
# Don't override __eq__ because it breaks comparisons on tuples of
# Factors.
for op in MATH_BINOPS.union(COMPARISONS - {'=='})
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): reflected_binary_operator(op)
for op in MATH_BINOPS
}
)
clsdict.update(
{
unary_op_name(op): unary_operator(op)
for op in UNARY_OPS
}
)
clsdict.update(
{
funcname: function_application(funcname)
for funcname in NUMEXPR_MATH_FUNCS
}
)
__truediv__ = clsdict['__div__']
__rtruediv__ = clsdict['__rdiv__']
# Add summary functions.
clsdict.update(
{name: summary_method(name) for name in summary_funcs.names},
)
del clsdict # don't pollute the class namespace with this.
eq = binary_operator('==')
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
)
@expect_types(
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def zscore(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that Z-Scores each day's results.
The Z-Score of a row is defined as::
(row - row.mean()) / row.stddev()
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means and standard deviations, and output NaN
anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, z-scoring the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when Z-Scoring.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute Z-Scores.
Returns
-------
zscored : zipline.pipeline.Factor
A Factor producing that z-scores the output of self.
Notes
-----
Mean and standard deviation are sensitive to the magnitudes of
outliers. When working with factor that can potentially produce large
outliers, it is often useful to use the ``mask`` parameter to discard
values at the extremes of the distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.zscore(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``zscore()`` is only supported on Factors of dtype float64.
Examples
--------
See :meth:`~zipline.pipeline.Factor.demean` for an in-depth
example of the semantics for ``mask`` and ``groupby``.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=zscore,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=True,
)
def rank(self,
method='ordinal',
ascending=True,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new Factor representing the sorted rank of each column
within each row.
Parameters
----------
method : str, {'ordinal', 'min', 'max', 'dense', 'average'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for
each ranking method. Default is 'ordinal'.
ascending : bool, optional
Whether to return sorted rank in ascending or descending order.
Default is True.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, ranks are computed ignoring any asset/date
pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
ranks : zipline.pipeline.Factor
A new factor that will compute the ranking of the data produced by
`self`.
Notes
-----
The default value for `method` is different from the default for
`scipy.stats.rankdata`. See that function's documentation for a full
description of the valid inputs to `method`.
Missing or non-existent data on a given day will cause an asset to be
given a rank of NaN for that day.
See Also
--------
:func:`scipy.stats.rankdata`
"""
if groupby is NotSpecified:
return Rank(self, method=method, ascending=ascending, mask=mask)
return GroupedRowTransform(
transform=rankdata if ascending else rankdata_1d_descending,
transform_args=(method,),
factor=self,
groupby=groupby,
dtype=float64_dtype,
missing_value=nan,
mask=mask,
window_safe=True,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def pearsonr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling pearson correlation
coefficients between ``target`` and the columns of ``self``.
Parameters
----------
target : zipline.pipeline.Term
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.Factor
A new Factor that will compute correlations between ``target`` and
the columns of ``self``.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.pearsonr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingPearsonOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.pearsonr`
:class:`zipline.pipeline.factors.RollingPearsonOfReturns`
:meth:`Factor.spearmanr`
"""
from .statistical import RollingPearson
return RollingPearson(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, correlation_length=int, mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def spearmanr(self, target, correlation_length, mask=NotSpecified):
"""
Construct a new Factor that computes rolling spearman rank correlation
coefficients between ``target`` and the columns of ``self``.
Parameters
----------
target : zipline.pipeline.Term
The term used to compute correlations against each column of data
produced by `self`. This may be a Factor, a BoundColumn or a Slice.
If `target` is two-dimensional, correlations are computed
asset-wise.
correlation_length : int
Length of the lookback window over which to compute each
correlation coefficient.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should have their correlation with
the target slice computed each day.
Returns
-------
correlations : zipline.pipeline.Factor
A new Factor that will compute correlations between ``target`` and
the columns of ``self``.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that computes the correlation
between AAPL's 10-day returns and the 10-day returns of all other
assets, computing each correlation over 30 days. This can be achieved
by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_correlations = returns.spearmanr(
target=returns_slice, correlation_length=30,
)
This is equivalent to doing::
aapl_correlations = RollingSpearmanOfReturns(
target=sid(24), returns_length=10, correlation_length=30,
)
See Also
--------
:func:`scipy.stats.spearmanr`
:meth:`Factor.pearsonr`
"""
from .statistical import RollingSpearman
return RollingSpearman(
base_factor=self,
target=target,
correlation_length=correlation_length,
mask=mask,
)
@expect_types(
target=Term, regression_length=int, mask=(Filter, NotSpecifiedType),
)
@templated_docstring(CORRELATION_METHOD_NOTE=CORRELATION_METHOD_NOTE)
def linear_regression(self, target, regression_length, mask=NotSpecified):
"""
Construct a new Factor that performs an ordinary least-squares
regression predicting the columns of `self` from `target`.
Parameters
----------
target : zipline.pipeline.Term
The term to use as the predictor/independent variable in each
regression. This may be a Factor, a BoundColumn or a Slice. If
`target` is two-dimensional, regressions are computed asset-wise.
regression_length : int
Length of the lookback window over which to compute each
regression.
mask : zipline.pipeline.Filter, optional
A Filter describing which assets should be regressed with the
target slice each day.
Returns
-------
regressions : zipline.pipeline.Factor
A new Factor that will compute linear regressions of `target`
against the columns of `self`.
Notes
-----
{CORRELATION_METHOD_NOTE}
Examples
--------
Suppose we want to create a factor that regresses AAPL's 10-day returns
against the 10-day returns of all other assets, computing each
regression over 30 days. This can be achieved by doing the following::
returns = Returns(window_length=10)
returns_slice = returns[sid(24)]
aapl_regressions = returns.linear_regression(
target=returns_slice, regression_length=30,
)
This is equivalent to doing::
aapl_regressions = RollingLinearRegressionOfReturns(
target=sid(24), returns_length=10, regression_length=30,
)
See Also
--------
:func:`scipy.stats.linregress`
"""
from .statistical import RollingLinearRegression
return RollingLinearRegression(
dependent=self,
independent=target,
regression_length=regression_length,
mask=mask,
)
@expect_types(
min_percentile=(int, float),
max_percentile=(int, float),
mask=(Filter, NotSpecifiedType),
groupby=(Classifier, NotSpecifiedType),
)
@float64_only
def winsorize(self,
min_percentile,
max_percentile,
mask=NotSpecified,
groupby=NotSpecified):
"""
Construct a new factor that winsorizes the result of this factor.
Winsorizing changes values ranked less than the minimum percentile to
the value at the minimum percentile. Similarly, values ranking above
the maximum percentile are changed to the value at the maximum
percentile.
Winsorizing is useful for limiting the impact of extreme data points
without completely removing those points.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing percentile cutoffs, and output NaN anywhere the mask is
False.
If ``groupby`` is supplied, winsorization is applied separately
separately to each group defined by ``groupby``.
Parameters
----------
min_percentile: float, int
Entries with values at or below this percentile will be replaced
with the (len(input) * min_percentile)th lowest value. If low
values should not be clipped, use 0.
max_percentile: float, int
Entries with values at or above this percentile will be replaced
with the (len(input) * max_percentile)th lowest value. If high
values should not be clipped, use 1.
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when winsorizing.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to winsorize.
Returns
-------
winsorized : zipline.pipeline.Factor
A Factor producing a winsorized version of self.
Examples
--------
.. code-block:: python
price = USEquityPricing.close.latest
columns={
'PRICE': price,
'WINSOR_1: price.winsorize(
min_percentile=0.25, max_percentile=0.75
),
'WINSOR_2': price.winsorize(
min_percentile=0.50, max_percentile=1.0
),
'WINSOR_3': price.winsorize(
min_percentile=0.0, max_percentile=0.5
),
}
Given a pipeline with columns, defined above, the result for a
given day could look like:
::
'PRICE' 'WINSOR_1' 'WINSOR_2' 'WINSOR_3'
Asset_1 1 2 4 3
Asset_2 2 2 4 3
Asset_3 3 3 4 3
Asset_4 4 4 4 4
Asset_5 5 5 5 4
Asset_6 6 5 5 4
See Also
--------
:func:`scipy.stats.mstats.winsorize`
:meth:`pandas.DataFrame.groupby`
"""
if not 0.0 <= min_percentile < max_percentile <= 1.0:
raise BadPercentileBounds(
min_percentile=min_percentile,
max_percentile=max_percentile,
upper_bound=1.0,
)
return GroupedRowTransform(
transform=winsorize,
transform_args=(min_percentile, max_percentile),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
mask=mask,
window_safe=self.window_safe,
)
@expect_types(bins=int, mask=(Filter, NotSpecifiedType))
def quantiles(self, bins, mask=NotSpecified):
"""
Construct a Classifier computing quantiles of the output of ``self``.
Every non-NaN data point the output is labelled with an integer value
from 0 to (bins - 1). NaNs are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
bins : int
Number of bins labels to compute.
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quantiles.
Returns
-------
quantiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to (bins - 1).
"""
if mask is NotSpecified:
mask = self.mask
return Quantiles(inputs=(self,), bins=bins, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quartiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quartiles over the output of ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, corresponding to the first, second, third, or fourth
quartile over each row. NaN data points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quartiles.
Returns
-------
quartiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 3.
"""
return self.quantiles(bins=4, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def quintiles(self, mask=NotSpecified):
"""
Construct a Classifier computing quintile labels on ``self``.
Every non-NaN data point the output is labelled with a value of either
0, 1, 2, or 3, 4, corresonding to quintiles over each row. NaN data
points are labelled with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing quintiles.
Returns
-------
quintiles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 4.
"""
return self.quantiles(bins=5, mask=mask)
@expect_types(mask=(Filter, NotSpecifiedType))
def deciles(self, mask=NotSpecified):
"""
Construct a Classifier computing decile labels on ``self``.
Every non-NaN data point the output is labelled with a value from 0 to
9 corresonding to deciles over each row. NaN data points are labelled
with -1.
If ``mask`` is supplied, ignore data points in locations for which
``mask`` produces False, and emit a label of -1 at those locations.
Parameters
----------
mask : zipline.pipeline.Filter, optional
Mask of values to ignore when computing deciles.
Returns
-------
deciles : zipline.pipeline.Classifier
A classifier producing integer labels ranging from 0 to 9.
"""
return self.quantiles(bins=10, mask=mask)
def top(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the top N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the top N asset
values for each group.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, top values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
if N == 1:
# Special case: if N == 1, we can avoid doing a full sort on every
# group, which is a big win.
return self._maximum(mask=mask, groupby=groupby)
return self.rank(ascending=False, mask=mask, groupby=groupby) <= N
def bottom(self, N, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Filter matching the bottom N asset values of self each day.
If ``groupby`` is supplied, returns a Filter matching the bottom N
asset values **for each group** defined by ``groupby``.
Parameters
----------
N : int
Number of assets passing the returned filter each day.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when computing ranks.
If mask is supplied, bottom values are computed ignoring any
asset/date pairs for which `mask` produces a value of False.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to perform ranking.
Returns
-------
filter : zipline.pipeline.Filter
"""
return self.rank(ascending=True, mask=mask, groupby=groupby) <= N
def _maximum(self, mask=NotSpecified, groupby=NotSpecified):
return MaximumFilter(self, groupby=groupby, mask=mask)
def percentile_between(self,
min_percentile,
max_percentile,
mask=NotSpecified):
"""
Construct a Filter matching values of self that fall within the range
defined by ``min_percentile`` and ``max_percentile``.
Parameters
----------
min_percentile : float [0.0, 100.0]
Return True for assets falling above this percentile in the data.
max_percentile : float [0.0, 100.0]
Return True for assets falling below this percentile in the data.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when percentile
calculating thresholds. If mask is supplied, percentile cutoffs
are computed each day using only assets for which ``mask`` returns
True. Assets for which ``mask`` produces False will produce False
in the output of this Factor as well.
Returns
-------
out : zipline.pipeline.Filter
A new filter that will compute the specified percentile-range mask.
"""
return PercentileFilter(
self,
min_percentile=min_percentile,
max_percentile=max_percentile,
mask=mask,
)
@if_not_float64_tell_caller_to_use_isnull
def isnan(self):
"""
A Filter producing True for all values where this Factor is NaN.
Returns
-------
nanfilter : zipline.pipeline.Filter
"""
return self != self
@if_not_float64_tell_caller_to_use_isnull
def notnan(self):
"""
A Filter producing True for values where this Factor is not NaN.
Returns
-------
nanfilter : zipline.pipeline.Filter
"""
return ~self.isnan()
@if_not_float64_tell_caller_to_use_isnull
def isfinite(self):
"""
A Filter producing True for values where this Factor is anything but
NaN, inf, or -inf.
"""
return (-inf < self) & (self < inf)
def clip(self, min_bound, max_bound, mask=NotSpecified):
"""
Clip (limit) the values in a factor.
Given an interval, values outside the interval are clipped to the
interval edges. For example, if an interval of ``[0, 1]`` is specified,
values smaller than 0 become 0, and values larger than 1 become 1.
Parameters
----------
min_bound : float
The minimum value to use.
max_bound : float
The maximum value to use.
mask : zipline.pipeline.Filter, optional
A Filter representing assets to consider when clipping.
Notes
-----
To only clip values on one side, ``-np.inf` and ``np.inf`` may be
passed. For example, to only clip the maximum value but not clip a
minimum value:
.. code-block:: python
factor.clip(min_bound=-np.inf, max_bound=user_provided_max)
See Also
--------
numpy.clip
"""
from .basic import Clip
return Clip(
inputs=[self],
min_bound=min_bound,
max_bound=max_bound,
)
@classmethod
def _principal_computable_term_type(cls):
return Factor
class NumExprFactor(NumericalExpression, Factor):
"""
Factor computed from a numexpr expression.
Parameters
----------
expr : string
A string suitable for passing to numexpr. All variables in 'expr'
should be of the form "x_i", where i is the index of the corresponding
factor input in 'binds'.
binds : tuple
A tuple of factors to use as inputs.
Notes
-----
NumExprFactors are constructed by numerical operators like `+` and `-`.
Users should rarely need to construct a NumExprFactor directly.
"""
pass
class GroupedRowTransform(Factor):
"""
A Factor that transforms an input factor by applying a row-wise
shape-preserving transformation on classifier-defined groups of that
Factor.
This is most often useful for normalization operators like ``zscore`` or
``demean`` or for performing ranking using ``rank``.
Parameters
----------
transform : function[ndarray[ndim=1] -> ndarray[ndim=1]]
Function to apply over each row group.
factor : zipline.pipeline.Factor
The factor providing baseline data to transform.
mask : zipline.pipeline.Filter
Mask of entries to ignore when calculating transforms.
groupby : zipline.pipeline.Classifier
Classifier partitioning ``factor`` into groups to use when calculating
means.
transform_args : tuple[hashable]
Additional positional arguments to forward to ``transform``.
Notes
-----
Users should rarely construct instances of this factor directly. Instead,
they should construct instances via factor normalization methods like
``zscore`` and ``demean`` or using ``rank`` with ``groupby``.
See Also
--------
zipline.pipeline.Factor.zscore
zipline.pipeline.Factor.demean
zipline.pipeline.Factor.rank
"""
window_length = 0
def __new__(cls,
transform,
transform_args,
factor,
groupby,
dtype,
missing_value,
mask,
**kwargs):
if mask is NotSpecified:
mask = factor.mask
else:
mask = mask & factor.mask
if groupby is NotSpecified:
groupby = Everything(mask=mask)
return super(GroupedRowTransform, cls).__new__(
GroupedRowTransform,
transform=transform,
transform_args=transform_args,
inputs=(factor, groupby),
missing_value=missing_value,
mask=mask,
dtype=dtype,
**kwargs
)
def _init(self, transform, transform_args, *args, **kwargs):
self._transform = transform
self._transform_args = transform_args
return super(GroupedRowTransform, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, transform, transform_args, *args, **kwargs):
return (
super(GroupedRowTransform, cls)._static_identity(*args, **kwargs),
transform,
transform_args,
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
# Make a copy with the null code written to masked locations.
group_labels = where(mask, group_labels, null_label)
return where(
group_labels != null_label,
naive_grouped_rowwise_apply(
data=data,
group_labels=group_labels,
func=self._transform,
func_args=self._transform_args,
out=empty_like(data, dtype=self.dtype),
),
self.missing_value,
)
@property
def transform_name(self):
return self._transform.__name__
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + '(%r)' % self.transform_name
class Rank(SingleInputMixin, Factor):
"""
A Factor representing the row-wise rank data of another Factor.
Parameters
----------
factor : zipline.pipeline.Factor
The factor on which to compute ranks.
method : str, {'average', 'min', 'max', 'dense', 'ordinal'}
The method used to assign ranks to tied elements. See
`scipy.stats.rankdata` for a full description of the semantics for each
ranking method.
See Also
--------
:func:`scipy.stats.rankdata`
:class:`Factor.rank`
Notes
-----
Most users should call Factor.rank rather than directly construct an
instance of this class.
"""
window_length = 0
dtype = float64_dtype
window_safe = True
def __new__(cls, factor, method, ascending, mask):
return super(Rank, cls).__new__(
cls,
inputs=(factor,),
method=method,
ascending=ascending,
mask=mask,
)
def _init(self, method, ascending, *args, **kwargs):
self._method = method
self._ascending = ascending
return super(Rank, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, method, ascending, *args, **kwargs):
return (
super(Rank, cls)._static_identity(*args, **kwargs),
method,
ascending,
)
def _validate(self):
"""
Verify that the stored rank method is valid.
"""
if self._method not in _RANK_METHODS:
raise UnknownRankMethod(
method=self._method,
choices=set(_RANK_METHODS),
)
return super(Rank, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
)
def __repr__(self):
if self.mask is AssetExists():
# Don't include mask in repr if it's the default.
mask_info = ""
else:
mask_info = ", mask={}".format(self.mask.recursive_repr())
return "{type}({input_}, method='{method}'{mask_info})".format(
type=type(self).__name__,
input_=self.inputs[0].recursive_repr(),
method=self._method,
mask_info=mask_info,
)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Rank:\\l method: {!r}\\l mask: {}\\l".format(
self._method,
type(self.mask).__name__,
)
class CustomFactor(PositiveWindowLengthMixin, CustomTermMixin, Factor):
'''
Base class for user-defined Factors.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to `self.compute`. If this
argument is not passed to the CustomFactor constructor, we look for a
class-level attribute named `inputs`.
outputs : iterable[str], optional
An iterable of strings which represent the names of each output this
factor should compute and return. If this argument is not passed to the
CustomFactor constructor, we look for a class-level attribute named
`outputs`.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFactor constructor, we look for a class-level attribute
named `window_length`.
mask : zipline.pipeline.Filter, optional
A Filter describing the assets on which we should compute each day.
Each call to ``CustomFactor.compute`` will only receive assets for
which ``mask`` produced True on the day for which compute is being
called.
Notes
-----
Users implementing their own Factors should subclass CustomFactor and
implement a method named `compute` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFactor constructor.
The specific types of the values passed to `compute` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[self.dtype, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`. If multiple outputs are
specified, `compute` should write its desired return values into
`out.<output_name>` for each output name in `self.outputs`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
``compute`` functions should expect to be passed NaN values for dates on
which no data was available for an asset. This may include dates on which
an asset did not yet exist.
For example, if a CustomFactor requires 10 rows of close price data, and
asset A started trading on Monday June 2nd, 2014, then on Tuesday, June
3rd, 2014, the column of input data for asset A will have 9 leading NaNs
for the preceding days on which data was not yet available.
Examples
--------
A CustomFactor with pre-declared defaults:
.. code-block:: python
class TenDayRange(CustomFactor):
"""
Computes the difference between the highest high in the last 10
days and the lowest low.
Pre-declares high and low as default inputs and `window_length` as
10.
"""
inputs = [USEquityPricing.high, USEquityPricing.low]
window_length = 10
def compute(self, today, assets, out, highs, lows):
from numpy import nanmin, nanmax
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
out[:] = highest_highs - lowest_lows
# Doesn't require passing inputs or window_length because they're
# pre-declared as defaults for the TenDayRange class.
ten_day_range = TenDayRange()
A CustomFactor without defaults:
.. code-block:: python
class MedianValue(CustomFactor):
"""
Computes the median value of an arbitrary single input over an
arbitrary window..
Does not declare any defaults, so values for `window_length` and
`inputs` must be passed explicitly on every construction.
"""
def compute(self, today, assets, out, data):
from numpy import nanmedian
out[:] = data.nanmedian(data, axis=0)
# Values for `inputs` and `window_length` must be passed explicitly to
# MedianValue.
median_close10 = MedianValue([USEquityPricing.close], window_length=10)
median_low15 = MedianValue([USEquityPricing.low], window_length=15)
A CustomFactor with multiple outputs:
.. code-block:: python
class MultipleOutputs(CustomFactor):
inputs = [USEquityPricing.close]
outputs = ['alpha', 'beta']
window_length = N
def compute(self, today, assets, out, close):
computed_alpha, computed_beta = some_function(close)
out.alpha[:] = computed_alpha
out.beta[:] = computed_beta
# Each output is returned as its own Factor upon instantiation.
alpha, beta = MultipleOutputs()
# Equivalently, we can create a single factor instance and access each
# output as an attribute of that instance.
multiple_outputs = MultipleOutputs()
alpha = multiple_outputs.alpha
beta = multiple_outputs.beta
Note: If a CustomFactor has multiple outputs, all outputs must have the
same dtype. For instance, in the example above, if alpha is a float then
beta must also be a float.
'''
dtype = float64_dtype
def _validate(self):
try:
super(CustomFactor, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomClassifier?',
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFilter?',
)
raise
def __getattribute__(self, name):
outputs = object.__getattribute__(self, 'outputs')
if outputs is NotSpecified:
return super(CustomFactor, self).__getattribute__(name)
elif name in outputs:
return RecarrayField(factor=self, attribute=name)
else:
try:
return super(CustomFactor, self).__getattribute__(name)
except AttributeError:
raise AttributeError(
'Instance of {factor} has no output named {attr!r}. '
'Possible choices are: {choices}.'.format(
factor=type(self).__name__,
attr=name,
choices=self.outputs,
)
)
def __iter__(self):
if self.outputs is NotSpecified:
raise ValueError(
'{factor} does not have multiple outputs.'.format(
factor=type(self).__name__,
)
)
return (RecarrayField(self, attr) for attr in self.outputs)
class RecarrayField(SingleInputMixin, Factor):
"""
A single field from a multi-output factor.
"""
def __new__(cls, factor, attribute):
return super(RecarrayField, cls).__new__(
cls,
attribute=attribute,
inputs=[factor],
window_length=0,
mask=factor.mask,
dtype=factor.dtype,
missing_value=factor.missing_value,
window_safe=factor.window_safe
)
def _init(self, attribute, *args, **kwargs):
self._attribute = attribute
return super(RecarrayField, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, attribute, *args, **kwargs):
return (
super(RecarrayField, cls)._static_identity(*args, **kwargs),
attribute,
)
def _compute(self, windows, dates, assets, mask):
return windows[0][self._attribute]
def graph_repr(self):
return "{}.{}".format(self.inputs[0].recursive_repr(), self._attribute)
class Latest(LatestMixin, CustomFactor):
"""
Factor producing the most recently-known value of `inputs[0]` on each day.
The `.latest` attribute of DataSet columns returns an instance of this
Factor.
"""
window_length = 1
def compute(self, today, assets, out, data):
out[:] = data[-1]
class DailySummary(SingleInputMixin, Factor):
"""1D Factor that computes a summary statistic across all assets.
"""
ndim = 1
window_length = 0
params = ('func',)
def __new__(cls, func, input_, mask, dtype):
# TODO: We should be able to support datetime64 as well, but that
# requires extra care for handling NaT.
if dtype != float64_dtype:
raise AssertionError(
"DailySummary only supports float64 dtype, got {}"
.format(dtype),
)
return super(DailySummary, cls).__new__(
cls,
inputs=[input_],
dtype=dtype,
missing_value=nan,
window_safe=input_.window_safe,
func=func,
mask=mask,
)
def _compute(self, arrays, dates, assets, mask):
func = self.params['func']
data = arrays[0]
data[~mask] = nan
if not isnan(self.inputs[0].missing_value):
data[data == self.inputs[0].missing_value] = nan
return as_column(func(data, self.inputs[0].missing_value))
def __repr__(self):
return "{}.{}()".format(
self.inputs[0].recursive_repr(),
self.params['func'].__name__,
)
graph_repr = recursive_repr = __repr__
# Functions to be passed to GroupedRowTransform. These aren't defined inline
# because the transformation function is part of the instance hash key.
def demean(row):
return row - nanmean(row)
def zscore(row):
return (row - nanmean(row)) / nanstd(row)
def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/factors/factor.py | factor.py |
from numbers import Number
from numpy import (
arange,
average,
clip,
copyto,
exp,
fmax,
full,
isnan,
log,
NINF,
sqrt,
sum as np_sum,
unique,
)
from zipline.pipeline.data import EquityPricing
from zipline.utils.input_validation import expect_types
from zipline.utils.math_utils import (
nanargmax,
nanmax,
nanmean,
nanstd,
nansum,
)
from zipline.utils.numpy_utils import (
float64_dtype,
ignore_nanwarnings,
)
from .factor import CustomFactor
from ..mixins import SingleInputMixin
class Returns(CustomFactor):
"""
Calculates the percent change in close price over the given window_length.
**Default Inputs**: [EquityPricing.close]
"""
inputs = [EquityPricing.close]
window_safe = True
def _validate(self):
super(Returns, self)._validate()
if self.window_length < 2:
raise ValueError(
"'Returns' expected a window length of at least 2, but was "
"given {window_length}. For daily returns, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, close):
out[:] = (close[-1] - close[0]) / close[0]
class PercentChange(SingleInputMixin, CustomFactor):
"""
Calculates the percent change over the given window_length.
**Default Inputs:** None
**Default Window Length:** None
Notes
-----
Percent change is calculated as ``(new - old) / abs(old)``.
"""
window_safe = True
def _validate(self):
super(PercentChange, self)._validate()
if self.window_length < 2:
raise ValueError(
"'PercentChange' expected a window length"
"of at least 2, but was given {window_length}. "
"For daily percent change, use a window "
"length of 2.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, values):
out[:] = (values[-1] - values[0]) / abs(values[0])
class DailyReturns(Returns):
"""
Calculates daily percent change in close price.
**Default Inputs**: [EquityPricing.close]
"""
inputs = [EquityPricing.close]
window_safe = True
window_length = 2
class SimpleMovingAverage(SingleInputMixin, CustomFactor):
"""
Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
out[:] = nanmean(data, axis=0)
class WeightedAverageValue(CustomFactor):
"""
Helper for VWAP-like computations.
**Default Inputs:** None
**Default Window Length:** None
"""
def compute(self, today, assets, out, base, weight):
out[:] = nansum(base * weight, axis=0) / nansum(weight, axis=0)
class VWAP(WeightedAverageValue):
"""
Volume Weighted Average Price
**Default Inputs:** [EquityPricing.close, EquityPricing.volume]
**Default Window Length:** None
"""
inputs = (EquityPricing.close, EquityPricing.volume)
class MaxDrawdown(SingleInputMixin, CustomFactor):
"""
Max Drawdown
**Default Inputs:** None
**Default Window Length:** None
"""
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
drawdowns = fmax.accumulate(data, axis=0) - data
drawdowns[isnan(drawdowns)] = NINF
drawdown_ends = nanargmax(drawdowns, axis=0)
# TODO: Accelerate this loop in Cython or Numba.
for i, end in enumerate(drawdown_ends):
peak = nanmax(data[:end + 1, i])
out[i] = (peak - data[end, i]) / data[end, i]
class AverageDollarVolume(CustomFactor):
"""
Average Daily Dollar Volume
**Default Inputs:** [EquityPricing.close, EquityPricing.volume]
**Default Window Length:** None
"""
inputs = [EquityPricing.close, EquityPricing.volume]
def compute(self, today, assets, out, close, volume):
out[:] = nansum(close * volume, axis=0) / len(close)
def exponential_weights(length, decay_rate):
"""
Build a weight vector for an exponentially-weighted statistic.
The resulting ndarray is of the form::
[decay_rate ** length, ..., decay_rate ** 2, decay_rate]
Parameters
----------
length : int
The length of the desired weight vector.
decay_rate : float
The rate at which entries in the weight vector increase or decrease.
Returns
-------
weights : ndarray[float64]
"""
return full(length, decay_rate, float64_dtype) ** arange(length + 1, 1, -1)
class _ExponentialWeightedFactor(SingleInputMixin, CustomFactor):
"""
Base class for factors implementing exponential-weighted operations.
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list or tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Methods
-------
weights
from_span
from_halflife
from_center_of_mass
"""
params = ('decay_rate',)
@classmethod
@expect_types(span=Number)
def from_span(cls, inputs, window_length, span, **kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of `span`.
Forwards `decay_rate` as `1 - (2.0 / (1 + span))`. This provides the
behavior equivalent to passing `span` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (2.0 / (1 + 15.0))),
# )
my_ewma = EWMA.from_span(
inputs=[EquityPricing.close],
window_length=30,
span=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if span <= 1:
raise ValueError(
"`span` must be a positive number. %s was passed." % span
)
decay_rate = (1.0 - (2.0 / (1.0 + span)))
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
@expect_types(halflife=Number)
def from_halflife(cls, inputs, window_length, halflife, **kwargs):
"""
Convenience constructor for passing ``decay_rate`` in terms of half
life.
Forwards ``decay_rate`` as ``exp(log(.5) / halflife)``. This provides
the behavior equivalent to passing `halflife` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=np.exp(np.log(0.5) / 15),
# )
my_ewma = EWMA.from_halflife(
inputs=[EquityPricing.close],
window_length=30,
halflife=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
if halflife <= 0:
raise ValueError(
"`span` must be a positive number. %s was passed." % halflife
)
decay_rate = exp(log(.5) / halflife)
assert 0.0 < decay_rate <= 1.0
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=decay_rate,
**kwargs
)
@classmethod
def from_center_of_mass(cls,
inputs,
window_length,
center_of_mass,
**kwargs):
"""
Convenience constructor for passing `decay_rate` in terms of center of
mass.
Forwards `decay_rate` as `1 - (1 / 1 + center_of_mass)`. This provides
behavior equivalent to passing `center_of_mass` to pandas.ewma.
Examples
--------
.. code-block:: python
# Equivalent to:
# my_ewma = EWMA(
# inputs=[EquityPricing.close],
# window_length=30,
# decay_rate=(1 - (1 / 15.0)),
# )
my_ewma = EWMA.from_center_of_mass(
inputs=[EquityPricing.close],
window_length=30,
center_of_mass=15,
)
Notes
-----
This classmethod is provided by both
:class:`ExponentialWeightedMovingAverage` and
:class:`ExponentialWeightedMovingStdDev`.
"""
return cls(
inputs=inputs,
window_length=window_length,
decay_rate=(1.0 - (1.0 / (1.0 + center_of_mass))),
**kwargs
)
class ExponentialWeightedMovingAverage(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Average
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMA``.
See Also
--------
:meth:`pandas.DataFrame.ewm`
"""
def compute(self, today, assets, out, data, decay_rate):
out[:] = average(
data,
axis=0,
weights=exponential_weights(len(data), decay_rate),
)
class ExponentialWeightedMovingStdDev(_ExponentialWeightedFactor):
"""
Exponentially Weighted Moving Standard Deviation
**Default Inputs:** None
**Default Window Length:** None
Parameters
----------
inputs : length-1 list/tuple of BoundColumn
The expression over which to compute the average.
window_length : int > 0
Length of the lookback window over which to compute the average.
decay_rate : float, 0 < decay_rate <= 1
Weighting factor by which to discount past observations.
When calculating historical averages, rows are multiplied by the
sequence::
decay_rate, decay_rate ** 2, decay_rate ** 3, ...
Notes
-----
- This class can also be imported under the name ``EWMSTD``.
See Also
--------
:func:`pandas.DataFrame.ewm`
"""
def compute(self, today, assets, out, data, decay_rate):
weights = exponential_weights(len(data), decay_rate)
mean = average(data, axis=0, weights=weights)
variance = average((data - mean) ** 2, axis=0, weights=weights)
squared_weight_sum = (np_sum(weights) ** 2)
bias_correction = (
squared_weight_sum / (squared_weight_sum - np_sum(weights ** 2))
)
out[:] = sqrt(variance * bias_correction)
class LinearWeightedMovingAverage(SingleInputMixin, CustomFactor):
"""
Weighted Average Value of an arbitrary column
**Default Inputs**: None
**Default Window Length**: None
"""
# numpy's nan functions throw warnings when passed an array containing only
# nans, but they still returns the desired value (nan), so we ignore the
# warning.
ctx = ignore_nanwarnings()
def compute(self, today, assets, out, data):
ndays = data.shape[0]
# Initialize weights array
weights = arange(1, ndays + 1, dtype=float64_dtype).reshape(ndays, 1)
# Compute normalizer
normalizer = (ndays * (ndays + 1)) / 2
# Weight the data
weighted_data = data * weights
# Compute weighted averages
out[:] = nansum(weighted_data, axis=0) / normalizer
class AnnualizedVolatility(CustomFactor):
"""
Volatility. The degree of variation of a series over time as measured by
the standard deviation of daily returns.
https://en.wikipedia.org/wiki/Volatility_(finance)
**Default Inputs:** [Returns(window_length=2)]
Parameters
----------
annualization_factor : float, optional
The number of time units per year. Defaults is 252, the number of NYSE
trading days in a normal year.
"""
inputs = [Returns(window_length=2)]
params = {'annualization_factor': 252.0}
window_length = 252
def compute(self, today, assets, out, returns, annualization_factor):
out[:] = nanstd(returns, axis=0) * (annualization_factor ** .5)
class PeerCount(SingleInputMixin, CustomFactor):
"""
Peer Count of distinct categories in a given classifier. This factor
is returned by the classifier instance method peer_count()
**Default Inputs:** None
**Default Window Length:** 1
"""
window_length = 1
def _validate(self):
super(PeerCount, self)._validate()
if self.window_length != 1:
raise ValueError(
"'PeerCount' expected a window length of 1, but was given"
"{window_length}.".format(window_length=self.window_length)
)
def compute(self, today, assets, out, classifier_values):
# Convert classifier array to group label int array
group_labels, null_label = self.inputs[0]._to_integral(
classifier_values[0]
)
_, inverse, counts = unique( # Get counts, idx of unique groups
group_labels,
return_counts=True,
return_inverse=True,
)
copyto(out, counts[inverse], where=(group_labels != null_label))
# Convenience aliases
EWMA = ExponentialWeightedMovingAverage
EWMSTD = ExponentialWeightedMovingStdDev
class Clip(CustomFactor):
"""
Clip (limit) the values in a factor.
Given an interval, values outside the interval are clipped to the interval
edges. For example, if an interval of ``[0, 1]`` is specified, values
smaller than 0 become 0, and values larger than 1 become 1.
**Default Window Length:** 1
Parameters
----------
min_bound : float
The minimum value to use.
max_bound : float
The maximum value to use.
Notes
-----
To only clip values on one side, ``-np.inf` and ``np.inf`` may be passed.
For example, to only clip the maximum value but not clip a minimum value:
.. code-block:: python
Clip(inputs=[factor], min_bound=-np.inf, max_bound=user_provided_max)
See Also
--------
numpy.clip
"""
window_length = 1
params = ('min_bound', 'max_bound')
def compute(self, today, assets, out, values, min_bound, max_bound):
clip(values[-1], min_bound, max_bound, out=out) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/factors/basic.py | basic.py |
from numpy import newaxis
from zipline.utils.numpy_utils import (
NaTD,
busday_count_mask_NaT,
datetime64D_dtype,
float64_dtype,
)
from .factor import Factor
class BusinessDaysSincePreviousEvent(Factor):
"""
Abstract class for business days since a previous event.
Returns the number of **business days** (not trading days!) since
the most recent event date for each asset.
This doesn't use trading days for symmetry with
BusinessDaysUntilNextEarnings.
Assets which announced or will announce the event today will produce a
value of 0.0. Assets that announced the event on the previous business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
Example
-------
``BusinessDaysSincePreviousEvent`` can be used to create an event-driven
factor. For instance, you may want to only trade assets that have
a data point with an asof_date in the last 5 business days. To do this,
you can create a ``BusinessDaysSincePreviousEvent`` factor, supplying
the relevant asof_date column from your dataset as input, like this::
# Factor computing number of days since most recent asof_date
# per asset.
days_since_event = BusinessDaysSincePreviousEvent(
inputs=[MyDataset.asof_date]
)
# Filter returning True for each asset whose most recent asof_date
# was in the last 5 business days.
recency_filter = (days_since_event <= 5)
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(announce_dates, reference_dates)
class BusinessDaysUntilNextEvent(Factor):
"""
Abstract class for business days since a next event.
Returns the number of **business days** (not trading days!) until
the next known event date for each asset.
This doesn't use trading days because the trading calendar includes
information that may not have been available to the algorithm at the time
when `compute` is called.
For example, the NYSE closings September 11th 2001, would not have been
known to the algorithm on September 10th.
Assets that announced or will announce the event today will produce a value
of 0.0. Assets that will announce the event on the next upcoming business
day will produce a value of 1.0.
Assets for which the event date is `NaT` will produce a value of `NaN`.
"""
window_length = 0
dtype = float64_dtype
def _compute(self, arrays, dates, assets, mask):
# Coerce from [ns] to [D] for numpy busday_count.
announce_dates = arrays[0].astype(datetime64D_dtype)
# Set masked values to NaT.
announce_dates[~mask] = NaTD
# Convert row labels into a column vector for broadcasted comparison.
reference_dates = dates.values.astype(datetime64D_dtype)[:, newaxis]
return busday_count_mask_NaT(reference_dates, announce_dates) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/factors/events.py | events.py |
from functools import partial
from numbers import Number
import operator
import re
from numpy import where, isnan, nan, zeros
import pandas as pd
from zipline.errors import UnsupportedDataType
from zipline.lib.labelarray import LabelArray
from zipline.lib.quantiles import quantiles
from zipline.pipeline.api_utils import restrict_to_dtype
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.sentinels import NotSpecified
from zipline.pipeline.term import ComputableTerm
from zipline.utils.compat import unicode
from zipline.utils.input_validation import expect_types, expect_dtypes
from zipline.utils.numpy_utils import (
categorical_dtype,
int64_dtype,
vectorized_is_element,
)
from ..filters import ArrayPredicate, NumExprFilter
from ..mixins import (
CustomTermMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
string_classifiers_only = restrict_to_dtype(
dtype=categorical_dtype,
message_template=(
"{method_name}() is only defined on Classifiers producing strings"
" but it was called on a Classifier of dtype {received_dtype}."
)
)
class Classifier(RestrictedDTypeMixin, ComputableTerm):
"""
A Pipeline expression computing a categorical output.
Classifiers are most commonly useful for describing grouping keys for
complex transformations on Factor outputs. For example, Factor.demean() and
Factor.zscore() can be passed a Classifier in their ``groupby`` argument,
indicating that means/standard deviations should be computed on assets for
which the classifier produced the same label.
"""
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = CLASSIFIER_DTYPES
categories = NotSpecified
# We explicitly don't support classifier to classifier comparisons, since
# the stored values likely don't mean the same thing. This may be relaxed
# in the future, but for now we're starting conservatively.
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other``.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value!r}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
else:
return ArrayPredicate(
term=self,
op=operator.eq,
opargs=(other,),
)
def __ne__(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other.
"""
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"((x_0 != {other}) & (x_0 != {missing}))".format(
other=int(other),
missing=self.missing_value,
),
binds=(self,),
)
else:
# Numexpr doesn't know how to use LabelArrays.
return ArrayPredicate(term=self, op=operator.ne, opargs=(other,))
def bad_compare(opname, other):
raise TypeError('cannot compare classifiers with %s' % opname)
__gt__ = partial(bad_compare, '>')
__ge__ = partial(bad_compare, '>=')
__le__ = partial(bad_compare, '<=')
__lt__ = partial(bad_compare, '<')
del bad_compare
@string_classifiers_only
@expect_types(prefix=(bytes, unicode))
def startswith(self, prefix):
"""
Construct a Filter matching values starting with ``prefix``.
Parameters
----------
prefix : str
String prefix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string starting with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.startswith,
opargs=(prefix,),
)
@string_classifiers_only
@expect_types(suffix=(bytes, unicode))
def endswith(self, suffix):
"""
Construct a Filter matching values ending with ``suffix``.
Parameters
----------
suffix : str
String suffix against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string ending with ``prefix``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.endswith,
opargs=(suffix,),
)
@string_classifiers_only
@expect_types(substring=(bytes, unicode))
def has_substring(self, substring):
"""
Construct a Filter matching values containing ``substring``.
Parameters
----------
substring : str
Sub-string against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string containing ``substring``.
"""
return ArrayPredicate(
term=self,
op=LabelArray.has_substring,
opargs=(substring,),
)
@string_classifiers_only
@expect_types(pattern=(bytes, unicode, type(re.compile(''))))
def matches(self, pattern):
"""
Construct a Filter that checks regex matches against ``pattern``.
Parameters
----------
pattern : str
Regex pattern against which to compare values produced by ``self``.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces a string matched by ``pattern``.
See Also
--------
:mod:`Python Regular Expressions <re>`
"""
return ArrayPredicate(
term=self,
op=LabelArray.matches,
opargs=(pattern,),
)
# TODO: Support relabeling for integer dtypes.
@string_classifiers_only
def relabel(self, relabeler):
"""
Convert ``self`` into a new classifier by mapping a function over each
element produced by ``self``.
Parameters
----------
relabeler : function[str -> str or None]
A function to apply to each unique value produced by ``self``.
Returns
-------
relabeled : Classifier
A classifier produced by applying ``relabeler`` to each unique
value produced by ``self``.
"""
return Relabel(term=self, relabeler=relabeler)
def element_of(self, choices):
"""
Construct a Filter indicating whether values are in ``choices``.
Parameters
----------
choices : iterable[str or int]
An iterable of choices.
Returns
-------
matches : Filter
Filter returning True for all sid/date pairs for which ``self``
produces an entry in ``choices``.
"""
try:
choices = frozenset(choices)
except Exception as e:
raise TypeError(
"Expected `choices` to be an iterable of hashable values,"
" but got {} instead.\n"
"This caused the following error: {!r}.".format(choices, e)
)
if self.missing_value in choices:
raise ValueError(
"Found self.missing_value ({mv!r}) in choices supplied to"
" {typename}.{meth_name}().\n"
"Missing values have NaN semantics, so the"
" requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.\n"
"Received choices were {choices}.".format(
mv=self.missing_value,
typename=(type(self).__name__),
choices=sorted(choices),
meth_name=self.element_of.__name__,
)
)
def only_contains(type_, values):
return all(isinstance(v, type_) for v in values)
if self.dtype == int64_dtype:
if only_contains(int, choices):
return ArrayPredicate(
term=self,
op=vectorized_is_element,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-int in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
elif self.dtype == categorical_dtype:
if only_contains((bytes, unicode), choices):
return ArrayPredicate(
term=self,
op=LabelArray.element_of,
opargs=(choices,),
)
else:
raise TypeError(
"Found non-string in choices for {typename}.element_of.\n"
"Supplied choices were {choices}.".format(
typename=type(self).__name__,
choices=choices,
)
)
assert False, "Unknown dtype in Classifier.element_of %s." % self.dtype
def postprocess(self, data):
if self.dtype == int64_dtype:
return data
if not isinstance(data, LabelArray):
raise AssertionError("Expected a LabelArray, got %s." % type(data))
return data.as_categorical()
def to_workspace_value(self, result, assets):
"""
Called with the result of a pipeline. This needs to return an object
which can be put into the workspace to continue doing computations.
This is the inverse of :func:`~zipline.pipeline.term.Term.postprocess`.
"""
if self.dtype == int64_dtype:
return super(Classifier, self).to_workspace_value(result, assets)
assert isinstance(result.values, pd.Categorical), (
'Expected a Categorical, got %r.' % type(result.values)
)
with_missing = pd.Series(
data=pd.Categorical(
result.values,
result.values.categories.union([self.missing_value]),
),
index=result.index,
)
return LabelArray(
super(Classifier, self).to_workspace_value(
with_missing,
assets,
),
self.missing_value,
)
@classmethod
def _principal_computable_term_type(cls):
return Classifier
def _to_integral(self, output_array):
"""
Convert an array produced by this classifier into an array of integer
labels and a missing value label.
"""
if self.dtype == int64_dtype:
group_labels = output_array
null_label = self.missing_value
elif self.dtype == categorical_dtype:
# Coerce LabelArray into an isomorphic array of ints. This is
# necessary because np.where doesn't know about LabelArrays or the
# void dtype.
group_labels = output_array.as_int_array()
null_label = output_array.missing_value_code
else:
raise AssertionError(
"Unexpected Classifier dtype: %s." % self.dtype
)
return group_labels, null_label
def peer_count(self, mask=NotSpecified):
"""
Construct a factor that gives the number of occurrences of
each distinct category in a classifier.
Parameters
----------
mask : zipline.pipeline.Filter, optional
If passed, only count assets passing the filter. Default behavior
is to count all assets.
Examples
--------
Let ``c`` be a Classifier which would produce the following output::
AAPL MSFT MCD BK AMZN FB
2015-05-05 'a' 'a' None 'b' 'a' None
2015-05-06 'b' 'a' 'c' 'b' 'b' 'b'
2015-05-07 None 'a' 'aa' 'aa' 'aa' None
2015-05-08 'c' 'c' 'c' 'c' 'c' 'c'
Then ``c.peer_count()`` will count, for each row, the total number
of assets in each classifier category produced by ``c``. Missing
data will be evaluated to NaN.
::
AAPL MSFT MCD BK AMZN FB
2015-05-05 3.0 3.0 NaN 1.0 3.0 NaN
2015-05-06 4.0 1.0 1.0 4.0 4.0 4.0
2015-05-07 NaN 1.0 3.0 3.0 3.0 NaN
2015-05-08 6.0 6.0 6.0 6.0 6.0 6.0
Returns
-------
factor : CustomFactor
A CustomFactor that counts, for each asset, the total number
of assets with the same classifier category label.
"""
# Lazy import due to cyclic dependencies in factor.py, classifier.py
from ..factors import PeerCount
return PeerCount(inputs=[self], mask=mask)
class Everything(Classifier):
"""
A trivial classifier that classifies everything the same.
"""
dtype = int64_dtype
window_length = 0
inputs = ()
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
return where(
mask,
zeros(shape=mask.shape, dtype=int64_dtype),
self.missing_value,
)
class Quantiles(SingleInputMixin, Classifier):
"""
A classifier computing quantiles over an input.
"""
params = ('bins',)
dtype = int64_dtype
window_length = 0
missing_value = -1
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
bins = self.params['bins']
to_bin = where(mask, data, nan)
result = quantiles(to_bin, bins)
# Write self.missing_value into nan locations, whether they were
# generated by our input mask or not.
result[isnan(result)] = self.missing_value
return result.astype(int64_dtype)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
return type(self).__name__ + '(%d)' % self.params['bins']
class Relabel(SingleInputMixin, Classifier):
"""
A classifier applying a relabeling function on the result of another
classifier.
Parameters
----------
arg : zipline.pipeline.Classifier
Term produceing the input to be relabeled.
relabel_func : function(LabelArray) -> LabelArray
Function to apply to the result of `term`.
"""
window_length = 0
params = ('relabeler',)
# TODO: Support relabeling for integer dtypes.
@expect_dtypes(term=categorical_dtype)
@expect_types(term=Classifier)
def __new__(cls, term, relabeler):
return super(Relabel, cls).__new__(
cls,
inputs=(term,),
dtype=term.dtype,
mask=term.mask,
relabeler=relabeler,
)
def _compute(self, arrays, dates, assets, mask):
relabeler = self.params['relabeler']
data = arrays[0]
if isinstance(data, LabelArray):
result = data.map(relabeler)
result[~mask] = data.missing_value
else:
raise NotImplementedError(
"Relabeling is not currently supported for "
"int-dtype classifiers."
)
return result
class CustomClassifier(PositiveWindowLengthMixin,
StandardOutputs,
CustomTermMixin,
Classifier):
"""
Base class for user-defined Classifiers.
Does not suppport multiple outputs.
See Also
--------
zipline.pipeline.CustomFactor
zipline.pipeline.CustomFilter
"""
def _validate(self):
try:
super(CustomClassifier, self)._validate()
except UnsupportedDataType:
if self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFactor?',
)
elif self.dtype in FILTER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFilter?',
)
raise
def _allocate_output(self, windows, shape):
"""
Override the default array allocation to produce a LabelArray when we
have a string-like dtype.
"""
if self.dtype == int64_dtype:
return super(CustomClassifier, self)._allocate_output(
windows,
shape,
)
# This is a little bit of a hack. We might not know what the
# categories for a LabelArray are until it's actually been loaded, so
# we need to look at the underlying data.
return windows[0].data.empty_like(shape)
class Latest(LatestMixin, CustomClassifier):
"""
A classifier producing the latest value of an input.
See Also
--------
zipline.pipeline.data.dataset.BoundColumn.latest
"""
pass
class InvalidClassifierComparison(TypeError):
def __init__(self, classifier, compval):
super(InvalidClassifierComparison, self).__init__(
"Can't compare classifier of dtype"
" {dtype} to value {value} of type {type}.".format(
dtype=classifier.dtype,
value=compval,
type=type(compval).__name__,
)
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/classifiers/classifier.py | classifier.py |
import os
import requests
import numpy as np
import pandas as pd
from zipline.data import bundles
from zipline.pipeline.classifiers import Classifier
from zipline.utils.numpy_utils import int64_dtype
# this gets all the data for the three exchanges 6000+ tickers
BASE_URL = "http://old.nasdaq.com/screening/companies-by-industry.aspx?&render=download"
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
# This list is provided, but you should refresh its content from time to time
RAW_FILE = "companylist.csv"
INPUT_FILE = os.path.join(BASE_PATH, RAW_FILE)
SID_FILE_NAME = "NASDAQ_sids.npy" # persisted np.array where
SECTOR_PATH = os.path.join(BASE_PATH, SID_FILE_NAME)
# NASDAQ sectors, not the same as Morningstar
SECTOR_CODING = {"Basic Industries": 0,
"Capital Goods": 1,
"Consumer Durables": 2,
"Consumer Non-Durables": 3,
"Consumer Services": 4,
"Energy": 5,
"Finance": 6,
"Health Care": 7,
"Miscellaneous": 8,
"Public Utilities": 9,
"Technology": 10,
"Transportation": 11,
"n/a": -1}
SECTOR_LABELS = dict(zip(SECTOR_CODING.values(), SECTOR_CODING.keys()))
def get_tickers_from_bundle(bundle_name):
"""Gets a list of tickers from a given bundle"""
bundle_data = bundles.load(bundle_name, os.environ, None)
# get a list of all sids
lifetimes = bundle_data.asset_finder._compute_asset_lifetimes("US")
all_sids = lifetimes.sid
# retreive all assets in the bundle
all_assets = bundle_data.asset_finder.retrieve_all(all_sids)
# return only tickers
return dict(map(lambda x: (x.symbol, x.sid), all_assets))
def download_nasdaq_company_list():
r = requests.get(BASE_URL, allow_redirects=True)
open(INPUT_FILE, 'wb').write(r.content)
def create_sid_table_from_file(bundle_name='alpaca_api'):
"""reads the raw file, maps tickers -> SIDS,
then maps sector strings to integers, and saves
to the file: SID_FILE"""
df = pd.read_csv(INPUT_FILE, index_col="Symbol")
df = df.drop_duplicates()
coded_sectors_for_ticker = df["Sector"].map(SECTOR_CODING).fillna(-1)
ae_d = get_tickers_from_bundle(bundle_name)
N = max(ae_d.values()) + 1
# create empty 1-D array to hold data where index = SID
sectors = np.full(N, -1, np.dtype('int64'))
# iterate over Assets in the bundle, and fill in sectors
for ticker, sid in ae_d.items():
sectors[sid] = coded_sectors_for_ticker.get(ticker, -1)
np.save(SECTOR_PATH, sectors)
class ZiplineTraderSector(Classifier):
inputs = ()
dtype = int64_dtype
window_length = 0
missing_value = -1
def __init__(self):
create_sid_table_from_file()
self.data = np.load(SECTOR_PATH)
def _compute(self, arrays, dates, assets, mask):
return np.where(
mask,
self.data[assets],
self.missing_value,
)
if __name__ == '__main__':
# if you want to refresh the basdaq asset list.
# download_nasdaq_company_list()
# get_tickers_from_bundle("alpaca_api")
create_sid_table_from_file()
sector = ZiplineTraderSector()
print(sector.data) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/classifiers/custom/sector.py | sector.py |
import abc
from collections import namedtuple, OrderedDict
from itertools import repeat
from textwrap import dedent
from weakref import WeakKeyDictionary
from six import (
iteritems,
with_metaclass,
)
from toolz import first
from zipline.currency import Currency
from zipline.data.fx import DEFAULT_FX_RATE
from zipline.pipeline.classifiers import Classifier, Latest as LatestClassifier
from zipline.pipeline.domain import Domain, GENERIC
from zipline.pipeline.factors import Factor, Latest as LatestFactor
from zipline.pipeline.filters import Filter, Latest as LatestFilter
from zipline.pipeline.sentinels import NotSpecified, sentinel
from zipline.pipeline.term import (
AssetExists,
LoadableTerm,
validate_dtype,
)
from zipline.utils.formatting import s, plural
from zipline.utils.input_validation import (
coerce_types,
ensure_dtype,
expect_types,
)
from zipline.utils.numpy_utils import float64_dtype, NoDefaultMissingValue
from zipline.utils.preprocess import preprocess
from zipline.utils.string_formatting import bulleted_list
IsSpecialization = sentinel('IsSpecialization')
class Column(object):
"""
An abstract column of data, not yet associated with a dataset.
"""
@preprocess(dtype=ensure_dtype)
def __init__(self,
dtype,
missing_value=NotSpecified,
doc=None,
metadata=None,
currency_aware=False):
if currency_aware and dtype != float64_dtype:
raise ValueError(
'Columns cannot be constructed with currency_aware={}, '
'dtype={}. Currency aware columns must have a float64 dtype.'
.format(currency_aware, dtype)
)
self.dtype = dtype
self.missing_value = missing_value
self.doc = doc
self.metadata = metadata.copy() if metadata is not None else {}
self.currency_aware = currency_aware
def bind(self, name):
"""
Bind a `Column` object to its name.
"""
return _BoundColumnDescr(
dtype=self.dtype,
missing_value=self.missing_value,
name=name,
doc=self.doc,
metadata=self.metadata,
currency_aware=self.currency_aware,
)
class _BoundColumnDescr(object):
"""
Intermediate class that sits on `DataSet` objects and returns memoized
`BoundColumn` objects when requested.
This exists so that subclasses of DataSets don't share columns with their
parent classes.
"""
def __init__(self,
dtype,
missing_value,
name,
doc,
metadata,
currency_aware):
# Validating and calculating default missing values here guarantees
# that we fail quickly if the user passes an unsupporte dtype or fails
# to provide a missing value for a dtype that requires one
# (e.g. int64), but still enables us to provide an error message that
# points to the name of the failing column.
try:
self.dtype, self.missing_value = validate_dtype(
termname="Column(name={name!r})".format(name=name),
dtype=dtype,
missing_value=missing_value,
)
except NoDefaultMissingValue:
# Re-raise with a more specific message.
raise NoDefaultMissingValue(
"Failed to create Column with name {name!r} and"
" dtype {dtype} because no missing_value was provided\n\n"
"Columns with dtype {dtype} require a missing_value.\n"
"Please pass missing_value to Column() or use a different"
" dtype.".format(dtype=dtype, name=name)
)
self.name = name
self.doc = doc
self.metadata = metadata
self.currency_aware = currency_aware
def __get__(self, instance, owner):
"""
Produce a concrete BoundColumn object when accessed.
We don't bind to datasets at class creation time so that subclasses of
DataSets produce different BoundColumns.
"""
return BoundColumn(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=owner,
name=self.name,
doc=self.doc,
metadata=self.metadata,
currency_conversion=None,
currency_aware=self.currency_aware,
)
class BoundColumn(LoadableTerm):
"""
A column of data that's been concretely bound to a particular dataset.
Attributes
----------
dtype : numpy.dtype
The dtype of data produced when this column is loaded.
latest : zipline.pipeline.LoadableTerm
A :class:`~zipline.pipeline.Filter`, :class:`~zipline.pipeline.Factor`,
or :class:`~zipline.pipeline.Classifier` computing the most recently
known value of this column on each date.
See :class:`zipline.pipeline.mixins.LatestMixin` for more details.
dataset : zipline.pipeline.data.DataSet
The dataset to which this column is bound.
name : str
The name of this column.
metadata : dict
Extra metadata associated with this column.
currency_aware : bool
Whether or not this column produces currency-denominated data.
Notes
-----
Instances of this class are dynamically created upon access to attributes
of :class:`~zipline.pipeline.data.DataSet`. For example,
:attr:`~zipline.pipeline.data.EquityPricing.close` is an instance of this
class. Pipeline API users should never construct instances of this
directly.
"""
mask = AssetExists()
window_safe = True
def __new__(cls,
dtype,
missing_value,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware):
if currency_aware and dtype != float64_dtype:
raise AssertionError(
'The {} column on dataset {} cannot be constructed with '
'currency_aware={}, dtype={}. Currency aware columns must '
'have a float64 dtype.'.format(
name,
dataset,
currency_aware,
dtype,
)
)
return super(BoundColumn, cls).__new__(
cls,
domain=dataset.domain,
dtype=dtype,
missing_value=missing_value,
dataset=dataset,
name=name,
ndim=dataset.ndim,
doc=doc,
metadata=metadata,
currency_conversion=currency_conversion,
currency_aware=currency_aware,
)
def _init(self,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware,
*args, **kwargs):
self._dataset = dataset
self._name = name
self.__doc__ = doc
self._metadata = metadata
self._currency_conversion = currency_conversion
self._currency_aware = currency_aware
return super(BoundColumn, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls,
dataset,
name,
doc,
metadata,
currency_conversion,
currency_aware,
*args, **kwargs):
return (
super(BoundColumn, cls)._static_identity(*args, **kwargs),
dataset,
name,
doc,
frozenset(sorted(metadata.items(), key=first)),
currency_conversion,
currency_aware,
)
def __lt__(self, other):
msg = "Can't compare '{}' with '{}'. (Did you mean to use '.latest'?)"
raise TypeError(msg.format(self.qualname, other.__class__.__name__))
__gt__ = __le__ = __ge__ = __lt__
def _replace(self, **kwargs):
kw = dict(
dtype=self.dtype,
missing_value=self.missing_value,
dataset=self._dataset,
name=self._name,
doc=self.__doc__,
metadata=self._metadata,
currency_conversion=self._currency_conversion,
currency_aware=self._currency_aware,
)
kw.update(kwargs)
return type(self)(**kw)
def specialize(self, domain):
"""Specialize ``self`` to a concrete domain.
"""
if domain == self.domain:
return self
return self._replace(dataset=self._dataset.specialize(domain))
def unspecialize(self):
"""
Unspecialize a column to its generic form.
This is equivalent to ``column.specialize(GENERIC)``.
"""
return self.specialize(GENERIC)
@coerce_types(currency=(str, Currency))
def fx(self, currency):
"""
Construct a currency-converted version of this column.
Parameters
----------
currency : str or zipline.currency.Currency
Currency into which to convert this column's data.
Returns
-------
column : BoundColumn
Column producing the same data as ``self``, but currency-converted
into ``currency``.
"""
conversion = self._currency_conversion
if not self._currency_aware:
raise TypeError(
'The .fx() method cannot be called on {} because it does not '
'produce currency-denominated data.'.format(self.qualname)
)
elif conversion is not None and conversion.currency == currency:
return self
return self._replace(
currency_conversion=CurrencyConversion(
currency=currency,
field=DEFAULT_FX_RATE,
)
)
@property
def currency_conversion(self):
"""Specification for currency conversions applied for this term.
"""
return self._currency_conversion
@property
def currency_aware(self):
"""
Whether or not this column produces currency-denominated data.
"""
return self._currency_aware
@property
def dataset(self):
"""
The dataset to which this column is bound.
"""
return self._dataset
@property
def name(self):
"""
The name of this column.
"""
return self._name
@property
def metadata(self):
"""
A copy of the metadata for this column.
"""
return self._metadata.copy()
@property
def qualname(self):
"""The fully-qualified name of this column.
"""
out = '.'.join([self.dataset.qualname, self.name])
conversion = self._currency_conversion
if conversion is not None:
out += '.fx({!r})'.format(conversion.currency.code)
return out
@property
def latest(self):
dtype = self.dtype
if dtype in Filter.ALLOWED_DTYPES:
Latest = LatestFilter
elif dtype in Classifier.ALLOWED_DTYPES:
Latest = LatestClassifier
else:
assert dtype in Factor.ALLOWED_DTYPES, "Unknown dtype %s." % dtype
Latest = LatestFactor
return Latest(
inputs=(self,),
dtype=dtype,
missing_value=self.missing_value,
ndim=self.ndim,
)
def __repr__(self):
return "{qualname}::{dtype}".format(
qualname=self.qualname,
dtype=self.dtype.name,
)
def graph_repr(self):
"""Short repr to use when rendering Pipeline graphs."""
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "BoundColumn:\\l Dataset: {}\\l Column: {}\\l".format(
self.dataset.__name__,
self.name
)
def recursive_repr(self):
"""Short repr used to render in recursive contexts."""
return self.qualname
class DataSetMeta(type):
"""
Metaclass for DataSets
Supplies name and dataset information to Column attributes, and manages
families of specialized dataset.
"""
def __new__(mcls, name, bases, dict_):
if len(bases) != 1:
# Disallowing multiple inheritance makes it easier for us to
# determine whether a given dataset is the root for its family of
# specializations.
raise TypeError("Multiple dataset inheritance is not supported.")
# This marker is set in the class dictionary by `specialize` below.
is_specialization = dict_.pop(IsSpecialization, False)
newtype = super(DataSetMeta, mcls).__new__(mcls, name, bases, dict_)
if not isinstance(newtype.domain, Domain):
raise TypeError(
"Expected a Domain for {}.domain, but got {} instead.".format(
newtype.__name__,
type(newtype.domain),
)
)
# Collect all of the column names that we inherit from our parents.
column_names = set().union(
*(getattr(base, '_column_names', ()) for base in bases)
)
# Collect any new columns from this dataset.
for maybe_colname, maybe_column in iteritems(dict_):
if isinstance(maybe_column, Column):
# add column names defined on our class
bound_column_descr = maybe_column.bind(maybe_colname)
setattr(newtype, maybe_colname, bound_column_descr)
column_names.add(maybe_colname)
newtype._column_names = frozenset(column_names)
if not is_specialization:
# This is the new root of a family of specializations. Store the
# memoized dictionary for family on this type.
newtype._domain_specializations = WeakKeyDictionary({
newtype.domain: newtype,
})
return newtype
@expect_types(domain=Domain)
def specialize(self, domain):
"""
Specialize a generic DataSet to a concrete domain.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain to which we should generate a specialization.
Returns
-------
specialized : type
A new :class:`~zipline.pipeline.data.DataSet` subclass with the
same columns as ``self``, but specialized to ``domain``.
"""
# We're already the specialization to this domain, so just return self.
if domain == self.domain:
return self
try:
return self._domain_specializations[domain]
except KeyError:
if not self._can_create_new_specialization(domain):
# This either means we're already a specialization and trying
# to create a new specialization, or we're the generic version
# of a root-specialized dataset, which we don't want to create
# new specializations of.
raise ValueError(
"Can't specialize {dataset} to new domain {new}.".format(
dataset=self.__name__,
current=self.domain,
new=domain,
)
)
new_type = self._create_specialization(domain)
self._domain_specializations[domain] = new_type
return new_type
def unspecialize(self):
"""
Unspecialize a dataset to its generic form.
This is equivalent to ``dataset.specialize(GENERIC)``.
"""
return self.specialize(GENERIC)
def _can_create_new_specialization(self, domain):
# Always allow specializing to a generic domain.
if domain is GENERIC:
return True
elif '_domain_specializations' in vars(self):
# This branch is True if we're the root of a family.
# Allow specialization if we're generic.
return self.domain is GENERIC
else:
# If we're not the root of a family, we can't create any new
# specializations.
return False
def _create_specialization(self, domain):
# These are all assertions because we should have handled these cases
# already in specialize().
assert isinstance(domain, Domain)
assert domain not in self._domain_specializations, (
"Domain specializations should be memoized!"
)
if domain is not GENERIC:
assert self.domain is GENERIC, (
"Can't specialize dataset with domain {} to domain {}.".format(
self.domain, domain,
)
)
# Create a new subclass of ``self`` with the given domain.
# Mark that it's a specialization so that we know not to create a new
# family for it.
name = self.__name__
bases = (self,)
dict_ = {'domain': domain, IsSpecialization: True}
out = type(name, bases, dict_)
out.__module__ = self.__module__
return out
@property
def columns(self):
return frozenset(
getattr(self, colname) for colname in self._column_names
)
@property
def qualname(self):
if self.domain is GENERIC:
specialization_key = ''
else:
specialization_key = '<' + self.domain.country_code + '>'
return self.__name__ + specialization_key
# NOTE: We used to use `functools.total_ordering` to account for all of the
# other rich comparison methods, but it has issues in python 3 and
# this method is only used for test purposes, so for now we will just
# keep this in isolation. If we ever need any of the other comparison
# methods we will have to implement them individually.
def __lt__(self, other):
return id(self) < id(other)
def __repr__(self):
return '<DataSet: %r, domain=%s>' % (self.__name__, self.domain)
class DataSet(with_metaclass(DataSetMeta, object)):
"""
Base class for Pipeline datasets.
A :class:`DataSet` is defined by two parts:
1. A collection of :class:`~zipline.pipeline.data.Column` objects that
describe the queryable attributes of the dataset.
2. A :class:`~zipline.pipeline.domain.Domain` describing the assets and
calendar of the data represented by the :class:`DataSet`.
To create a new Pipeline dataset, define a subclass of :class:`DataSet` and
set one or more :class:`Column` objects as class-level attributes. Each
column requires a ``np.dtype`` that describes the type of data that should
be produced by a loader for the dataset. Integer columns must also provide
a "missing value" to be used when no value is available for a given
asset/date combination.
By default, the domain of a dataset is the special singleton value,
:data:`~zipline.pipeline.domain.GENERIC`, which means that they can be used
in a Pipeline running on **any** domain.
In some cases, it may be preferable to restrict a dataset to only allow
support a single domain. For example, a DataSet may describe data from a
vendor that only covers the US. To restrict a dataset to a specific domain,
define a `domain` attribute at class scope.
You can also define a domain-specific version of a generic DataSet by
calling its ``specialize`` method with the domain of interest.
Examples
--------
The built-in EquityPricing dataset is defined as follows::
class EquityPricing(DataSet):
open = Column(float)
high = Column(float)
low = Column(float)
close = Column(float)
volume = Column(float)
The built-in USEquityPricing dataset is a specialization of
EquityPricing. It is defined as::
from zipline.pipeline.domain import US_EQUITIES
USEquityPricing = EquityPricing.specialize(US_EQUITIES)
Columns can have types other than float. A dataset containing assorted
company metadata might be defined like this::
class CompanyMetadata(DataSet):
# Use float for semantically-numeric data, even if it's always
# integral valued (see Notes section below). The default missing
# value for floats is NaN.
shares_outstanding = Column(float)
# Use object for string columns. The default missing value for
# object-dtype columns is None.
ticker = Column(object)
# Use integers for integer-valued categorical data like sector or
# industry codes. Integer-dtype columns require an explicit missing
# value.
sector_code = Column(int, missing_value=-1)
# Use bool for boolean-valued flags. Note that the default missing
# value for bool-dtype columns is False.
is_primary_share = Column(bool)
Notes
-----
Because numpy has no native support for integers with missing values, users
are strongly encouraged to use floats for any data that's semantically
numeric. Doing so enables the use of `NaN` as a natural missing value,
which has useful propagation semantics.
"""
domain = GENERIC
ndim = 2
@classmethod
def get_column(cls, name):
"""Look up a column by name.
Parameters
----------
name : str
Name of the column to look up.
Returns
-------
column : zipline.pipeline.data.BoundColumn
Column with the given name.
Raises
------
AttributeError
If no column with the given name exists.
"""
clsdict = vars(cls)
try:
maybe_column = clsdict[name]
if not isinstance(maybe_column, _BoundColumnDescr):
raise KeyError(name)
except KeyError:
raise AttributeError(
"{dset} has no column {colname!r}:\n\n"
"Possible choices are:\n"
"{choices}".format(
dset=cls.qualname,
colname=name,
choices=bulleted_list(
sorted(cls._column_names),
max_count=10,
),
)
)
# Resolve column descriptor into a BoundColumn.
return maybe_column.__get__(None, cls)
# This attribute is set by DataSetMeta to mark that a class is the root of a
# family of datasets with diffent domains. We don't want that behavior for the
# base DataSet class, and we also don't want to accidentally use a shared
# version of this attribute if we fail to set this in a subclass somewhere.
del DataSet._domain_specializations
class DataSetFamilyLookupError(AttributeError):
"""Exception thrown when a column is accessed on a DataSetFamily
instead of on the result of a slice.
Parameters
----------
family_name : str
The name of the DataSetFamily on which the access occurred.
column_name : str
The name of the column accessed.
"""
def __init__(self, family_name, column_name):
self.family_name = family_name
self.column_name = column_name
def __str__(self):
# NOTE: when ``aggregate`` is added, remember to update this message
return dedent(
"""\
Attempted to access column {c} from DataSetFamily {d}:
To work with dataset families, you must first select a
slice using the ``slice`` method:
{d}.slice(...).{c}
""".format(c=self.column_name, d=self.family_name)
)
class _DataSetFamilyColumn(object):
"""Descriptor used to raise a helpful error when a column is accessed on a
DataSetFamily instead of on the result of a slice.
Parameters
----------
column_names : str
The name of the column.
"""
def __init__(self, column_name):
self.column_name = column_name
def __get__(self, instance, owner):
raise DataSetFamilyLookupError(
owner.__name__,
self.column_name,
)
class DataSetFamilyMeta(abc.ABCMeta):
def __new__(cls, name, bases, dict_):
columns = {}
for k, v in dict_.items():
if isinstance(v, Column):
# capture all the columns off the DataSetFamily class
# and replace them with a descriptor that will raise a helpful
# error message. The columns will get added to the BaseSlice
# for this type.
columns[k] = v
dict_[k] = _DataSetFamilyColumn(k)
is_abstract = dict_.pop('_abstract', False)
self = super(DataSetFamilyMeta, cls).__new__(
cls,
name,
bases,
dict_,
)
if not is_abstract:
self.extra_dims = extra_dims = OrderedDict([
(k, frozenset(v))
for k, v in OrderedDict(self.extra_dims).items()
])
if not extra_dims:
raise ValueError(
'DataSetFamily must be defined with non-empty'
' extra_dims, or with `_abstract = True`',
)
class BaseSlice(self._SliceType):
dataset_family = self
ndim = self.slice_ndim
domain = self.domain
locals().update(columns)
BaseSlice.__name__ = '%sBaseSlice' % self.__name__
self._SliceType = BaseSlice
# each type gets a unique cache
self._slice_cache = {}
return self
def __repr__(self):
return '<DataSetFamily: %r, extra_dims=%r>' % (
self.__name__,
list(self.extra_dims),
)
class DataSetFamilySlice(DataSet):
"""Marker type for slices of a
:class:`zipline.pipeline.data.dataset.DataSetFamily` objects
"""
# XXX: This docstring was mostly written when the abstraction here was
# "MultiDimensionalDataSet". It probably needs some rewriting.
class DataSetFamily(with_metaclass(DataSetFamilyMeta)):
"""
Base class for Pipeline dataset families.
Dataset families are used to represent data where the unique identifier for
a row requires more than just asset and date coordinates. A
:class:`DataSetFamily` can also be thought of as a collection of
:class:`~zipline.pipeline.data.DataSet` objects, each of which has the same
columns, domain, and ndim.
:class:`DataSetFamily` objects are defined with one or more
:class:`~zipline.pipeline.data.Column` objects, plus one additional field:
``extra_dims``.
The ``extra_dims`` field defines coordinates other than asset and date that
must be fixed to produce a logical timeseries. The column objects determine
columns that will be shared by slices of the family.
``extra_dims`` are represented as an ordered dictionary where the keys are
the dimension name, and the values are a set of unique values along that
dimension.
To work with a :class:`DataSetFamily` in a pipeline expression, one must
choose a specific value for each of the extra dimensions using the
:meth:`~zipline.pipeline.data.DataSetFamily.slice` method.
For example, given a :class:`DataSetFamily`:
.. code-block:: python
class SomeDataSet(DataSetFamily):
extra_dims = [
('dimension_0', {'a', 'b', 'c'}),
('dimension_1', {'d', 'e', 'f'}),
]
column_0 = Column(float)
column_1 = Column(bool)
This dataset might represent a table with the following columns:
::
sid :: int64
asof_date :: datetime64[ns]
timestamp :: datetime64[ns]
dimension_0 :: str
dimension_1 :: str
column_0 :: float64
column_1 :: bool
Here we see the implicit ``sid``, ``asof_date`` and ``timestamp`` columns
as well as the extra dimensions columns.
This :class:`DataSetFamily` can be converted to a regular :class:`DataSet`
with:
.. code-block:: python
DataSetSlice = SomeDataSet.slice(dimension_0='a', dimension_1='e')
This sliced dataset represents the rows from the higher dimensional dataset
where ``(dimension_0 == 'a') & (dimension_1 == 'e')``.
"""
_abstract = True # Removed by metaclass
domain = GENERIC
slice_ndim = 2
_SliceType = DataSetFamilySlice
@type.__call__
class extra_dims(object):
"""OrderedDict[str, frozenset] of dimension name -> unique values
May be defined on subclasses as an iterable of pairs: the
metaclass converts this attribute to an OrderedDict.
"""
__isabstractmethod__ = True
def __get__(self, instance, owner):
return []
@classmethod
def _canonical_key(cls, args, kwargs):
extra_dims = cls.extra_dims
dimensions_set = set(extra_dims)
if not set(kwargs) <= dimensions_set:
extra = sorted(set(kwargs) - dimensions_set)
raise TypeError(
'%s does not have the following %s: %s\n'
'Valid dimensions are: %s' % (
cls.__name__,
s('dimension', extra),
', '.join(extra),
', '.join(extra_dims),
),
)
if len(args) > len(extra_dims):
raise TypeError(
'%s has %d extra %s but %d %s given' % (
cls.__name__,
len(extra_dims),
s('dimension', extra_dims),
len(args),
plural('was', 'were', args),
),
)
missing = object()
coords = OrderedDict(zip(extra_dims, repeat(missing)))
to_add = dict(zip(extra_dims, args))
coords.update(to_add)
added = set(to_add)
for key, value in kwargs.items():
if key in added:
raise TypeError(
'%s got multiple values for dimension %r' % (
cls.__name__,
coords,
),
)
coords[key] = value
added.add(key)
missing = {k for k, v in coords.items() if v is missing}
if missing:
missing = sorted(missing)
raise TypeError(
'no coordinate provided to %s for the following %s: %s' % (
cls.__name__,
s('dimension', missing),
', '.join(missing),
),
)
# validate that all of the provided values exist along their given
# dimensions
for key, value in coords.items():
if value not in cls.extra_dims[key]:
raise ValueError(
'%r is not a value along the %s dimension of %s' % (
value,
key,
cls.__name__,
),
)
return coords, tuple(coords.items())
@classmethod
def _make_dataset(cls, coords):
"""Construct a new dataset given the coordinates.
"""
class Slice(cls._SliceType):
extra_coords = coords
Slice.__name__ = '%s.slice(%s)' % (
cls.__name__,
', '.join('%s=%r' % item for item in coords.items()),
)
return Slice
@classmethod
def slice(cls, *args, **kwargs):
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date.
Parameters
----------
*args
**kwargs
The coordinates to fix along each extra dimension.
Returns
-------
dataset : DataSet
A regular pipeline dataset indexed by asset and date.
Notes
-----
The extra dimensions coords used to produce the result are available
under the ``extra_coords`` attribute.
"""
coords, hash_key = cls._canonical_key(args, kwargs)
try:
return cls._slice_cache[hash_key]
except KeyError:
pass
Slice = cls._make_dataset(coords)
cls._slice_cache[hash_key] = Slice
return Slice
CurrencyConversion = namedtuple(
'CurrencyConversion',
['currency', 'field'],
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/data/dataset.py | dataset.py |
from itertools import chain
from operator import attrgetter
from numpy import (
any as np_any,
float64,
nan,
nanpercentile,
uint8,
)
from zipline.errors import (
BadPercentileBounds,
NonExistentAssetInTimeFrame,
UnsupportedDataType,
)
from zipline.lib.labelarray import LabelArray
from zipline.lib.rank import is_missing, grouped_masked_is_maximal
from zipline.pipeline.dtypes import (
CLASSIFIER_DTYPES,
FACTOR_DTYPES,
FILTER_DTYPES,
)
from zipline.pipeline.expression import (
BadBinaryOperator,
FILTER_BINOPS,
method_name_for_op,
NumericalExpression,
)
from zipline.pipeline.mixins import (
CustomTermMixin,
IfElseMixin,
LatestMixin,
PositiveWindowLengthMixin,
RestrictedDTypeMixin,
SingleInputMixin,
StandardOutputs,
)
from zipline.pipeline.term import ComputableTerm, Term
from zipline.utils.input_validation import expect_types
from zipline.utils.numpy_utils import (
same,
bool_dtype,
int64_dtype,
repeat_first_axis,
)
from ..sentinels import NotSpecified
def concat_tuples(*tuples):
"""
Concatenate a sequence of tuples into one tuple.
"""
return tuple(chain(*tuples))
def binary_operator(op):
"""
Factory function for making binary operator methods on a Filter subclass.
Returns a function "binary_operator" suitable for implementing functions
like __and__ or __or__.
"""
# When combining a Filter with a NumericalExpression, we use this
# attrgetter instance to defer to the commuted interpretation of the
# NumericalExpression operator.
commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))
def binary_operator(self, other):
if isinstance(self, NumericalExpression):
self_expr, other_expr, new_inputs = self.build_binary_op(
op, other,
)
return NumExprFilter.create(
"({left}) {op} ({right})".format(
left=self_expr,
op=op,
right=other_expr,
),
new_inputs,
)
elif isinstance(other, NumericalExpression):
# NumericalExpression overrides numerical ops to correctly handle
# merging of inputs. Look up and call the appropriate
# right-binding operator with ourself as the input.
return commuted_method_getter(other)(self)
elif isinstance(other, Term):
if other.dtype != bool_dtype:
raise BadBinaryOperator(op, self, other)
if self is other:
return NumExprFilter.create(
"x_0 {op} x_0".format(op=op),
(self,),
)
return NumExprFilter.create(
"x_0 {op} x_1".format(op=op),
(self, other),
)
elif isinstance(other, int): # Note that this is true for bool as well
return NumExprFilter.create(
"x_0 {op} {constant}".format(op=op, constant=int(other)),
binds=(self,),
)
raise BadBinaryOperator(op, self, other)
binary_operator.__doc__ = "Binary Operator: '%s'" % op
return binary_operator
def unary_operator(op):
"""
Factory function for making unary operator methods for Filters.
"""
valid_ops = {'~'}
if op not in valid_ops:
raise ValueError("Invalid unary operator %s." % op)
def unary_operator(self):
# This can't be hoisted up a scope because the types returned by
# unary_op_return_type aren't defined when the top-level function is
# invoked.
if isinstance(self, NumericalExpression):
return NumExprFilter.create(
"{op}({expr})".format(op=op, expr=self._expr),
self.inputs,
)
else:
return NumExprFilter.create("{op}x_0".format(op=op), (self,))
unary_operator.__doc__ = "Unary Operator: '%s'" % op
return unary_operator
class Filter(RestrictedDTypeMixin, ComputableTerm):
"""
Pipeline expression computing a boolean output.
Filters are most commonly useful for describing sets of assets to include
or exclude for some particular purpose. Many Pipeline API functions accept
a ``mask`` argument, which can be supplied a Filter indicating that only
values passing the Filter should be considered when performing the
requested computation. For example, :meth:`zipline.pipeline.Factor.top`
accepts a mask indicating that ranks should be computed only on assets that
passed the specified Filter.
The most common way to construct a Filter is via one of the comparison
operators (``<``, ``<=``, ``!=``, ``eq``, ``>``, ``>=``) of
:class:`~zipline.pipeline.Factor`. For example, a natural way to construct
a Filter for stocks with a 10-day VWAP less than $20.0 is to first
construct a Factor computing 10-day VWAP and compare it to the scalar value
20.0::
>>> from zipline.pipeline.factors import VWAP
>>> vwap_10 = VWAP(window_length=10)
>>> vwaps_under_20 = (vwap_10 <= 20)
Filters can also be constructed via comparisons between two Factors. For
example, to construct a Filter producing True for asset/date pairs where
the asset's 10-day VWAP was greater than it's 30-day VWAP::
>>> short_vwap = VWAP(window_length=10)
>>> long_vwap = VWAP(window_length=30)
>>> higher_short_vwap = (short_vwap > long_vwap)
Filters can be combined via the ``&`` (and) and ``|`` (or) operators.
``&``-ing together two filters produces a new Filter that produces True if
**both** of the inputs produced True.
``|``-ing together two filters produces a new Filter that produces True if
**either** of its inputs produced True.
The ``~`` operator can be used to invert a Filter, swapping all True values
with Falses and vice-versa.
Filters may be set as the ``screen`` attribute of a Pipeline, indicating
asset/date pairs for which the filter produces False should be excluded
from the Pipeline's output. This is useful both for reducing noise in the
output of a Pipeline and for reducing memory consumption of Pipeline
results.
"""
# Filters are window-safe by default, since a yes/no decision means the
# same thing from all temporal perspectives.
window_safe = True
# Used by RestrictedDTypeMixin
ALLOWED_DTYPES = FILTER_DTYPES
dtype = bool_dtype
clsdict = locals()
clsdict.update(
{
method_name_for_op(op): binary_operator(op)
for op in FILTER_BINOPS
}
)
clsdict.update(
{
method_name_for_op(op, commute=True): binary_operator(op)
for op in FILTER_BINOPS
}
)
__invert__ = unary_operator('~')
def _validate(self):
# Run superclass validation first so that we handle `dtype not passed`
# before this.
retval = super(Filter, self)._validate()
if self.dtype != bool_dtype:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype
)
return retval
@classmethod
def _principal_computable_term_type(cls):
return Filter
@expect_types(if_true=ComputableTerm, if_false=ComputableTerm)
def if_else(self, if_true, if_false):
"""
Create a term that selects values from one of two choices.
Parameters
----------
if_true : zipline.pipeline.term.ComputableTerm
Expression whose values should be used at locations where this
filter outputs True.
if_false : zipline.pipeline.term.ComputableTerm
Expression whose values should be used at locations where this
filter outputs False.
Returns
-------
merged : zipline.pipeline.term.ComputableTerm
A term that computes by taking values from either ``if_true`` or
``if_false``, depending on the values produced by ``self``.
The returned term draws from``if_true`` at locations where ``self``
produces True, and it draws from ``if_false`` at locations where
``self`` produces False.
Example
-------
Let ``f`` be a Factor that produces the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 5.0 6.0 7.0 8.0
Let ``g`` be another Factor that produces the following output::
AAPL MSFT MCD BK
2017-03-13 10.0 20.0 30.0 40.0
2017-03-14 50.0 60.0 70.0 80.0
Finally, let ``condition`` be a Filter that produces the following
output::
AAPL MSFT MCD BK
2017-03-13 True False True False
2017-03-14 True True False False
Then, the expression ``condition.if_else(f, g)`` produces the following
output::
AAPL MSFT MCD BK
2017-03-13 1.0 20.0 3.0 40.0
2017-03-14 5.0 6.0 70.0 80.0
See Also
--------
numpy.where
Factor.fillna
"""
true_type = if_true._principal_computable_term_type()
false_type = if_false._principal_computable_term_type()
if true_type is not false_type:
raise TypeError(
"Mismatched types in if_else(): if_true={}, but if_false={}"
.format(true_type.__name__, false_type.__name__)
)
if if_true.dtype != if_false.dtype:
raise TypeError(
"Mismatched dtypes in if_else(): "
"if_true.dtype = {}, if_false.dtype = {}"
.format(if_true.dtype, if_false.dtype)
)
if if_true.outputs != if_false.outputs:
raise ValueError(
"Mismatched outputs in if_else(): "
"if_true.outputs = {}, if_false.outputs = {}"
.format(if_true.outputs, if_false.outputs),
)
if not same(if_true.missing_value, if_false.missing_value):
raise ValueError(
"Mismatched missing values in if_else(): "
"if_true.missing_value = {!r}, if_false.missing_value = {!r}"
.format(if_true.missing_value, if_false.missing_value)
)
return_type = type(if_true)._with_mixin(IfElseMixin)
return return_type(
condition=self,
if_true=if_true,
if_false=if_false,
)
class NumExprFilter(NumericalExpression, Filter):
"""
A Filter computed from a numexpr expression.
"""
@classmethod
def create(cls, expr, binds):
"""
Helper for creating new NumExprFactors.
This is just a wrapper around NumericalExpression.__new__ that always
forwards `bool` as the dtype, since Filters can only be of boolean
dtype.
"""
return cls(expr=expr, binds=binds, dtype=bool_dtype)
def _compute(self, arrays, dates, assets, mask):
"""
Compute our result with numexpr, then re-apply `mask`.
"""
return super(NumExprFilter, self)._compute(
arrays,
dates,
assets,
mask,
) & mask
class NullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return data.is_missing()
return is_missing(arrays[0], self.inputs[0].missing_value)
class NotNullFilter(SingleInputMixin, Filter):
"""
A Filter indicating whether input values are **not** missing from an input.
Parameters
----------
factor : zipline.pipeline.Term
The factor to compare against its missing_value.
"""
window_length = 0
def __new__(cls, term):
return super(NotNullFilter, cls).__new__(
cls,
inputs=(term,),
)
def _compute(self, arrays, dates, assets, mask):
data = arrays[0]
if isinstance(data, LabelArray):
return ~data.is_missing()
return ~is_missing(arrays[0], self.inputs[0].missing_value)
class PercentileFilter(SingleInputMixin, Filter):
"""
A Filter representing assets falling between percentile bounds of a Factor.
Parameters
----------
factor : zipline.pipeline.factor.Factor
The factor over which to compute percentile bounds.
min_percentile : float [0.0, 1.0]
The minimum percentile rank of an asset that will pass the filter.
max_percentile : float [0.0, 1.0]
The maxiumum percentile rank of an asset that will pass the filter.
"""
window_length = 0
def __new__(cls, factor, min_percentile, max_percentile, mask):
return super(PercentileFilter, cls).__new__(
cls,
inputs=(factor,),
mask=mask,
min_percentile=min_percentile,
max_percentile=max_percentile,
)
def _init(self, min_percentile, max_percentile, *args, **kwargs):
self._min_percentile = min_percentile
self._max_percentile = max_percentile
return super(PercentileFilter, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, min_percentile, max_percentile, *args, **kwargs):
return (
super(PercentileFilter, cls)._static_identity(*args, **kwargs),
min_percentile,
max_percentile,
)
def _validate(self):
"""
Ensure that our percentile bounds are well-formed.
"""
if not 0.0 <= self._min_percentile < self._max_percentile <= 100.0:
raise BadPercentileBounds(
min_percentile=self._min_percentile,
max_percentile=self._max_percentile,
upper_bound=100.0
)
return super(PercentileFilter, self)._validate()
def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a mask of all values falling between
the given percentiles.
"""
# TODO: Review whether there's a better way of handling small numbers
# of columns.
data = arrays[0].copy().astype(float64)
data[~mask] = nan
# FIXME: np.nanpercentile **should** support computing multiple bounds
# at once, but there's a bug in the logic for multiple bounds in numpy
# 1.9.2. It will be fixed in 1.10.
# c.f. https://github.com/numpy/numpy/pull/5981
lower_bounds = nanpercentile(
data,
self._min_percentile,
axis=1,
keepdims=True,
)
upper_bounds = nanpercentile(
data,
self._max_percentile,
axis=1,
keepdims=True,
)
return (lower_bounds <= data) & (data <= upper_bounds)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "{}:\\l min: {}, max: {}\\l".format(
type(self).__name__,
self._min_percentile,
self._max_percentile,
)
class CustomFilter(PositiveWindowLengthMixin, CustomTermMixin, Filter):
"""
Base class for user-defined Filters.
Parameters
----------
inputs : iterable, optional
An iterable of `BoundColumn` instances (e.g. USEquityPricing.close),
describing the data to load and pass to ``self.compute``. If this
argument is passed to the CustomFilter constructor, we look for a
class-level attribute named ``inputs``.
window_length : int, optional
Number of rows to pass for each input. If this argument is not passed
to the CustomFilter constructor, we look for a class-level attribute
named `window_length`.
Notes
-----
Users implementing their own Filters should subclass CustomFilter and
implement a method named ``compute`` with the following signature:
.. code-block:: python
def compute(self, today, assets, out, *inputs):
...
On each simulation date, ``compute`` will be called with the current date,
an array of sids, an output array, and an input array for each expression
passed as inputs to the CustomFilter constructor.
The specific types of the values passed to ``compute`` are as follows::
today : np.datetime64[ns]
Row label for the last row of all arrays passed as `inputs`.
assets : np.array[int64, ndim=1]
Column labels for `out` and`inputs`.
out : np.array[bool, ndim=1]
Output array of the same shape as `assets`. `compute` should write
its desired return values into `out`.
*inputs : tuple of np.array
Raw data arrays corresponding to the values of `self.inputs`.
See the documentation for
:class:`~zipline.pipeline.CustomFactor` for more details on
implementing a custom ``compute`` method.
See Also
--------
zipline.pipeline.CustomFactor
"""
def _validate(self):
try:
super(CustomFilter, self)._validate()
except UnsupportedDataType:
if self.dtype in CLASSIFIER_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomClassifier?',
)
elif self.dtype in FACTOR_DTYPES:
raise UnsupportedDataType(
typename=type(self).__name__,
dtype=self.dtype,
hint='Did you mean to create a CustomFactor?',
)
raise
class ArrayPredicate(SingleInputMixin, Filter):
"""
A filter applying a function from (ndarray, *args) -> ndarray[bool].
Parameters
----------
term : zipline.pipeline.Term
Term producing the array over which the predicate will be computed.
op : function(ndarray, *args) -> ndarray[bool]
Function to apply to the result of `term`.
opargs : tuple[hashable]
Additional argument to apply to ``op``.
"""
params = ('op', 'opargs')
window_length = 0
@expect_types(term=Term, opargs=tuple)
def __new__(cls, term, op, opargs):
hash(opargs) # fail fast if opargs isn't hashable.
return super(ArrayPredicate, cls).__new__(
ArrayPredicate,
op=op,
opargs=opargs,
inputs=(term,),
mask=term.mask,
)
def _compute(self, arrays, dates, assets, mask):
params = self.params
data = arrays[0]
return params['op'](data, *params['opargs']) & mask
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "{}:\\l op: {}.{}()".format(
type(self).__name__,
self.params['op'].__module__,
self.params['op'].__name__,
)
class Latest(LatestMixin, CustomFilter):
"""
Filter producing the most recently-known value of `inputs[0]` on each day.
"""
pass
class SingleAsset(Filter):
"""
A Filter that computes to True only for the given asset.
"""
inputs = []
window_length = 1
def __new__(cls, asset):
return super(SingleAsset, cls).__new__(cls, asset=asset)
def _init(self, asset, *args, **kwargs):
self._asset = asset
return super(SingleAsset, self)._init(*args, **kwargs)
@classmethod
def _static_identity(cls, asset, *args, **kwargs):
return (
super(SingleAsset, cls)._static_identity(*args, **kwargs), asset,
)
def _compute(self, arrays, dates, assets, mask):
is_my_asset = (assets == self._asset.sid)
out = repeat_first_axis(is_my_asset, len(mask))
# Raise an exception if `self._asset` does not exist for the entirety
# of the timeframe over which we are computing.
if (is_my_asset.sum() != 1) or ((out & mask).sum() != len(mask)):
raise NonExistentAssetInTimeFrame(
asset=self._asset, start_date=dates[0], end_date=dates[-1],
)
return out
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "SingleAsset:\\l asset: {!r}\\l".format(self._asset)
class StaticSids(Filter):
"""
A Filter that computes True for a specific set of predetermined sids.
``StaticSids`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of sids that are known ahead of
time.
Parameters
----------
sids : iterable[int]
An iterable of sids for which to filter.
"""
inputs = ()
window_length = 0
params = ('sids',)
def __new__(cls, sids):
sids = frozenset(sids)
return super(StaticSids, cls).__new__(cls, sids=sids)
def _compute(self, arrays, dates, sids, mask):
my_columns = sids.isin(self.params['sids'])
return repeat_first_axis(my_columns, len(mask)) & mask
class StaticAssets(StaticSids):
"""
A Filter that computes True for a specific set of predetermined assets.
``StaticAssets`` is mostly useful for debugging or for interactively
computing pipeline terms for a fixed set of assets that are known ahead of
time.
Parameters
----------
assets : iterable[Asset]
An iterable of assets for which to filter.
"""
def __new__(cls, assets):
sids = frozenset(asset.sid for asset in assets)
return super(StaticAssets, cls).__new__(cls, sids)
class AllPresent(CustomFilter, SingleInputMixin, StandardOutputs):
"""Pipeline filter indicating input term has data for a given window.
"""
def _validate(self):
if isinstance(self.inputs[0], Filter):
raise TypeError(
"Input to filter `AllPresent` cannot be a Filter."
)
return super(AllPresent, self)._validate()
def compute(self, today, assets, out, value):
if isinstance(value, LabelArray):
out[:] = ~np_any(value.is_missing(), axis=0)
else:
out[:] = ~np_any(
is_missing(value, self.inputs[0].missing_value),
axis=0,
)
class MaximumFilter(Filter, StandardOutputs):
"""Pipeline filter that selects the top asset, possibly grouped and masked.
"""
window_length = 0
def __new__(cls, factor, groupby, mask):
if groupby is NotSpecified:
from zipline.pipeline.classifiers import Everything
groupby = Everything()
return super(MaximumFilter, cls).__new__(
cls,
inputs=(factor, groupby),
mask=mask,
)
def _compute(self, arrays, dates, assets, mask):
# XXX: We're doing a lot of unncessary work here if `groupby` isn't
# specified.
data = arrays[0]
group_labels, null_label = self.inputs[1]._to_integral(arrays[1])
effective_mask = (
mask
& (group_labels != null_label)
& ~is_missing(data, self.inputs[0].missing_value)
).view(uint8)
return grouped_masked_is_maximal(
# Unconditionally view the data as int64.
# This is safe because casting from float64 to int64 is an
# order-preserving operation.
data.view(int64_dtype),
# PERF: Consider supporting different sizes of group labels.
group_labels.astype(int64_dtype),
effective_mask,
)
def __repr__(self):
return "Maximum({}, groupby={}, mask={})".format(
self.inputs[0].recursive_repr(),
self.inputs[1].recursive_repr(),
self.mask.recursive_repr(),
)
def graph_repr(self):
# Graphviz interprets `\l` as "divide label into lines, left-justified"
return "Maximum:\\l groupby: {}\\l mask: {}\\l".format(
self.inputs[1].recursive_repr(),
self.mask.recursive_repr(),
) | zipline-trader | /zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/filters/filter.py | filter.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.