code
stringlengths
501
5.19M
package
stringlengths
2
81
path
stringlengths
9
304
filename
stringlengths
4
145
from collections import defaultdict from interface import implements from numpy import iinfo, uint32, multiply from zipline.data.fx import ExplodingFXRateReader from zipline.lib.adjusted_array import AdjustedArray from zipline.utils.numpy_utils import repeat_first_axis from .base import PipelineLoader from .utils import shift_dates from ..data.equity_pricing import EquityPricing UINT32_MAX = iinfo(uint32).max class EquityPricingLoader(implements(PipelineLoader)): """A PipelineLoader for loading daily OHLCV data. Parameters ---------- raw_price_reader : zipline.data.session_bars.SessionBarReader Reader providing raw prices. adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader Reader providing price/volume adjustments. fx_reader : zipline.data.fx.FXRateReader Reader providing currency conversions. """ def __init__(self, raw_price_reader, adjustments_reader, fx_reader): self.raw_price_reader = raw_price_reader self.adjustments_reader = adjustments_reader self.fx_reader = fx_reader @classmethod def without_fx(cls, raw_price_reader, adjustments_reader): """ Construct an EquityPricingLoader without support for fx rates. The returned loader will raise an error if requested to load currency-converted columns. Parameters ---------- raw_price_reader : zipline.data.session_bars.SessionBarReader Reader providing raw prices. adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader Reader providing price/volume adjustments. Returns ------- loader : EquityPricingLoader A loader that can only provide currency-naive data. """ return cls( raw_price_reader=raw_price_reader, adjustments_reader=adjustments_reader, fx_reader=ExplodingFXRateReader(), ) def load_adjusted_array(self, domain, columns, dates, sids, mask): # load_adjusted_array is called with dates on which the user's algo # will be shown data, which means we need to return the data that would # be known at the **start** of each date. We assume that the latest # data known on day N is the data from day (N - 1), so we shift all # query dates back by a trading session. sessions = domain.all_sessions() shifted_dates = shift_dates(sessions, dates[0], dates[-1], shift=1) ohlcv_cols, currency_cols = self._split_column_types(columns) del columns # From here on we should use ohlcv_cols or currency_cols. ohlcv_colnames = [c.name for c in ohlcv_cols] raw_ohlcv_arrays = self.raw_price_reader.load_raw_arrays( ohlcv_colnames, shifted_dates[0], shifted_dates[-1], sids, ) # Currency convert raw_arrays in place if necessary. We use shifted # dates to load currency conversion rates to make them line up with # dates used to fetch prices. self._inplace_currency_convert( ohlcv_cols, raw_ohlcv_arrays, shifted_dates, sids, ) adjustments = self.adjustments_reader.load_pricing_adjustments( ohlcv_colnames, dates, sids, ) out = {} for c, c_raw, c_adjs in zip(ohlcv_cols, raw_ohlcv_arrays, adjustments): out[c] = AdjustedArray( c_raw.astype(c.dtype), c_adjs, c.missing_value, ) for c in currency_cols: codes_1d = self.raw_price_reader.currency_codes(sids) codes = repeat_first_axis(codes_1d, len(dates)) out[c] = AdjustedArray( codes, adjustments={}, missing_value=None, ) return out @property def currency_aware(self): # Tell the pipeline engine that this loader supports currency # conversion if we have a non-dummy fx rates reader. return not isinstance(self.fx_reader, ExplodingFXRateReader) def _inplace_currency_convert(self, columns, arrays, dates, sids): """ Currency convert raw data loaded for ``column``. Parameters ---------- columns : list[zipline.pipeline.data.BoundColumn] List of columns whose raw data has been loaded. arrays : list[np.array] List of arrays, parallel to ``columns`` containing data for the column. dates : pd.DatetimeIndex Labels for rows of ``arrays``. These are the dates that should be used to fetch fx rates for conversion. sids : np.array[int64] Labels for columns of ``arrays``. Returns ------- None Side Effects ------------ Modifies ``arrays`` in place by applying currency conversions. """ # Group columns by currency conversion spec. by_spec = defaultdict(list) for column, array in zip(columns, arrays): by_spec[column.currency_conversion].append(array) # Nothing to do for terms with no currency conversion. by_spec.pop(None, None) if not by_spec: return fx_reader = self.fx_reader base_currencies = self.raw_price_reader.currency_codes(sids) # Columns with the same conversion spec will use the same multipliers. for spec, arrays in by_spec.items(): rates = fx_reader.get_rates( rate=spec.field, quote=spec.currency.code, bases=base_currencies, dts=dates, ) for arr in arrays: multiply(arr, rates, out=arr) def _split_column_types(self, columns): """Split out currency columns from OHLCV columns. Parameters ---------- columns : list[zipline.pipeline.data.BoundColumn] Columns to be loaded by ``load_adjusted_array``. Returns ------- ohlcv_columns : list[zipline.pipeline.data.BoundColumn] Price and volume columns from ``columns``. currency_columns : list[zipline.pipeline.data.BoundColumn] Currency code column from ``columns``, if present. """ currency_name = EquityPricing.currency.name ohlcv = [] currency = [] for c in columns: if c.name == currency_name: currency.append(c) else: ohlcv.append(c) return ohlcv, currency # Backwards compat alias. USEquityPricingLoader = EquityPricingLoader
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/equity_pricing_loader.py
equity_pricing_loader.py
from interface import implements from numpy import ( arange, array, eye, float64, full, iinfo, nan, uint32, ) from numpy.random import RandomState from pandas import DataFrame, Timestamp from six import iteritems from sqlite3 import connect as sqlite3_connect from .base import PipelineLoader from .frame import DataFrameLoader from zipline.data.adjustments import ( SQLiteAdjustmentReader, SQLiteAdjustmentWriter, ) from zipline.data.bcolz_daily_bars import US_EQUITY_PRICING_BCOLZ_COLUMNS from zipline.utils.numpy_utils import ( bool_dtype, datetime64ns_dtype, float64_dtype, int64_dtype, object_dtype, ) UINT_32_MAX = iinfo(uint32).max def nanos_to_seconds(nanos): return nanos / (1000 * 1000 * 1000) class PrecomputedLoader(implements(PipelineLoader)): """ Synthetic PipelineLoader that uses a pre-computed array for each column. Parameters ---------- values : dict Map from column to values to use for that column. Values can be anything that can be passed as the first positional argument to a DataFrame whose indices are ``dates`` and ``sids`` dates : iterable[datetime-like] Row labels for input data. Can be anything that pd.DataFrame will coerce to a DatetimeIndex. sids : iterable[int-like] Column labels for input data. Can be anything that pd.DataFrame will coerce to an Int64Index. Notes ----- Adjustments are unsupported by this loader. """ def __init__(self, constants, dates, sids): loaders = {} for column, const in iteritems(constants): frame = DataFrame( const, index=dates, columns=sids, dtype=column.dtype, ) loaders[column] = DataFrameLoader( column=column, baseline=frame, adjustments=None, ) self._loaders = loaders def load_adjusted_array(self, domain, columns, dates, sids, mask): """ Load by delegating to sub-loaders. """ out = {} for col in columns: try: loader = self._loaders.get(col) if loader is None: loader = self._loaders[col.unspecialize()] except KeyError: raise ValueError("Couldn't find loader for %s" % col) out.update( loader.load_adjusted_array(domain, [col], dates, sids, mask) ) return out class EyeLoader(PrecomputedLoader): """ A PrecomputedLoader that emits arrays containing 1s on the diagonal and 0s elsewhere. Parameters ---------- columns : list[BoundColumn] Columns that this loader should know about. dates : iterable[datetime-like] Same as PrecomputedLoader. sids : iterable[int-like] Same as PrecomputedLoader """ def __init__(self, columns, dates, sids): shape = (len(dates), len(sids)) super(EyeLoader, self).__init__( {column: eye(shape, dtype=column.dtype) for column in columns}, dates, sids, ) class SeededRandomLoader(PrecomputedLoader): """ A PrecomputedLoader that emits arrays randomly-generated with a given seed. Parameters ---------- seed : int Seed for numpy.random.RandomState. columns : list[BoundColumn] Columns that this loader should know about. dates : iterable[datetime-like] Same as PrecomputedLoader. sids : iterable[int-like] Same as PrecomputedLoader """ def __init__(self, seed, columns, dates, sids): self._seed = seed super(SeededRandomLoader, self).__init__( {c: self.values(c.dtype, dates, sids) for c in columns}, dates, sids, ) def values(self, dtype, dates, sids): """ Make a random array of shape (len(dates), len(sids)) with ``dtype``. """ shape = (len(dates), len(sids)) return { datetime64ns_dtype: self._datetime_values, float64_dtype: self._float_values, int64_dtype: self._int_values, bool_dtype: self._bool_values, object_dtype: self._object_values, }[dtype](shape) @property def state(self): """ Make a new RandomState from our seed. This ensures that every call to _*_values produces the same output every time for a given SeededRandomLoader instance. """ return RandomState(self._seed) def _float_values(self, shape): """ Return uniformly-distributed floats between -0.0 and 100.0. """ return self.state.uniform(low=0.0, high=100.0, size=shape) def _int_values(self, shape): """ Return uniformly-distributed integers between 0 and 100. """ return (self.state.randint(low=0, high=100, size=shape) .astype('int64')) # default is system int def _datetime_values(self, shape): """ Return uniformly-distributed dates in 2014. """ start = Timestamp('2014', tz='UTC').asm8 offsets = self.state.randint( low=0, high=364, size=shape, ).astype('timedelta64[D]') return start + offsets def _bool_values(self, shape): """ Return uniformly-distributed True/False values. """ return self.state.randn(*shape) < 0 def _object_values(self, shape): res = self._int_values(shape).astype(str).astype(object) return res OHLCV = ('open', 'high', 'low', 'close', 'volume') OHLC = ('open', 'high', 'low', 'close') PSEUDO_EPOCH = Timestamp('2000-01-01', tz='UTC') def asset_start(asset_info, asset): ret = asset_info.loc[asset]['start_date'] if ret.tz is None: ret = ret.tz_localize('UTC') assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp" return ret def asset_end(asset_info, asset): ret = asset_info.loc[asset]['end_date'] if ret.tz is None: ret = ret.tz_localize('UTC') assert ret.tzname() == 'UTC', "Unexpected non-UTC timestamp" return ret def make_bar_data(asset_info, calendar, holes=None): """ For a given asset/date/column combination, we generate a corresponding raw value using the following formula for OHLCV columns: data(asset, date, column) = (100,000 * asset_id) + (10,000 * column_num) + (date - Jan 1 2000).days # ~6000 for 2015 where: column_num('open') = 0 column_num('high') = 1 column_num('low') = 2 column_num('close') = 3 column_num('volume') = 4 We use days since Jan 1, 2000 to guarantee that there are no collisions while also the produced values smaller than UINT32_MAX / 1000. For 'day' and 'id', we use the standard format expected by the base class. Parameters ---------- asset_info : DataFrame DataFrame with asset_id as index and 'start_date'/'end_date' columns. calendar : pd.DatetimeIndex The trading calendar to use. holes : dict[int -> tuple[pd.Timestamps]], optional A dict mapping asset ids to the tuple of dates that should have no data for that asset in the output. Default is no holes. Yields ------ p : (int, pd.DataFrame) A sid, data pair to be passed to BcolzDailyDailyBarWriter.write """ assert ( # Using .value here to avoid having to care about UTC-aware dates. PSEUDO_EPOCH.value < calendar.normalize().min().value <= asset_info['start_date'].min().value ), "calendar.min(): %s\nasset_info['start_date'].min(): %s" % ( calendar.min(), asset_info['start_date'].min(), ) assert (asset_info['start_date'] < asset_info['end_date']).all() def _raw_data_for_asset(asset_id): """ Generate 'raw' data that encodes information about the asset. See docstring for a description of the data format. """ # Get the dates for which this asset existed according to our asset # info. datetimes = calendar[calendar.slice_indexer( asset_start(asset_info, asset_id), asset_end(asset_info, asset_id), )] data = full( (len(datetimes), len(US_EQUITY_PRICING_BCOLZ_COLUMNS)), asset_id * 100 * 1000, dtype=uint32, ) # Add 10,000 * column-index to OHLCV columns data[:, :5] += arange(5, dtype=uint32) * 1000 # Add days since Jan 1 2001 for OHLCV columns. data[:, :5] += (datetimes - PSEUDO_EPOCH).days[:, None].astype(uint32) frame = DataFrame( data, index=datetimes, columns=US_EQUITY_PRICING_BCOLZ_COLUMNS, ) if holes is not None and asset_id in holes: for dt in holes[asset_id]: frame.loc[dt, OHLC] = nan frame.loc[dt, ['volume']] = 0 frame['day'] = nanos_to_seconds(datetimes.asi8) frame['id'] = asset_id return frame for asset in asset_info.index: yield asset, _raw_data_for_asset(asset) def expected_bar_value(asset_id, date, colname): """ Check that the raw value for an asset/date/column triple is as expected. Used by tests to verify data written by a writer. """ from_asset = asset_id * 100000 from_colname = OHLCV.index(colname) * 1000 from_date = (date - PSEUDO_EPOCH).days return from_asset + from_colname + from_date def expected_bar_value_with_holes(asset_id, date, colname, holes, missing_value): # Explicit holes are filled with the missing value. if asset_id in holes and date in holes[asset_id]: return missing_value return expected_bar_value(asset_id, date, colname) def expected_bar_values_2d(dates, assets, asset_info, colname, holes=None): """ Return an 2D array containing cls.expected_value(asset_id, date, colname) for each date/asset pair in the inputs. Missing locs are filled with 0 for volume and NaN for price columns: - Values before/after an asset's lifetime. - Values for asset_ids not contained in asset_info. - Locs defined in `holes`. """ if colname == 'volume': dtype = uint32 missing = 0 else: dtype = float64 missing = float('nan') data = full((len(dates), len(assets)), missing, dtype=dtype) for j, asset in enumerate(assets): # Use missing values when asset_id is not contained in asset_info. if asset not in asset_info.index: continue start = asset_start(asset_info, asset) end = asset_end(asset_info, asset) for i, date in enumerate(dates): # No value expected for dates outside the asset's start/end # date. if not (start <= date <= end): continue if holes is not None: expected = expected_bar_value_with_holes( asset, date, colname, holes, missing, ) else: expected = expected_bar_value(asset, date, colname) data[i, j] = expected return data class NullAdjustmentReader(SQLiteAdjustmentReader): """ A SQLiteAdjustmentReader that stores no adjustments and uses in-memory SQLite. """ def __init__(self): conn = sqlite3_connect(':memory:') writer = SQLiteAdjustmentWriter(conn, None, None) empty = DataFrame({ 'sid': array([], dtype=uint32), 'effective_date': array([], dtype=uint32), 'ratio': array([], dtype=float), }) empty_dividends = DataFrame({ 'sid': array([], dtype=uint32), 'amount': array([], dtype=float64), 'record_date': array([], dtype='datetime64[ns]'), 'ex_date': array([], dtype='datetime64[ns]'), 'declared_date': array([], dtype='datetime64[ns]'), 'pay_date': array([], dtype='datetime64[ns]'), }) writer.write(splits=empty, mergers=empty, dividends=empty_dividends) super(NullAdjustmentReader, self).__init__(conn)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/synthetic.py
synthetic.py
from functools import partial from interface import implements from numpy import ( ix_, zeros, ) from pandas import ( DataFrame, DatetimeIndex, Index, Int64Index, ) from zipline.lib.adjusted_array import AdjustedArray from zipline.lib.adjustment import make_adjustment_from_labels from zipline.utils.numpy_utils import as_column from .base import PipelineLoader ADJUSTMENT_COLUMNS = Index([ 'sid', 'value', 'kind', 'start_date', 'end_date', 'apply_date', ]) class DataFrameLoader(implements(PipelineLoader)): """ A PipelineLoader that reads its input from DataFrames. Mostly useful for testing, but can also be used for real work if your data fits in memory. Parameters ---------- column : zipline.pipeline.data.BoundColumn The column whose data is loadable by this loader. baseline : pandas.DataFrame A DataFrame with index of type DatetimeIndex and columns of type Int64Index. Dates should be labelled with the first date on which a value would be **available** to an algorithm. This means that OHLCV data should generally be shifted back by a trading day before being supplied to this class. adjustments : pandas.DataFrame, default=None A DataFrame with the following columns: sid : int value : any kind : int (zipline.pipeline.loaders.frame.ADJUSTMENT_TYPES) start_date : datetime64 (can be NaT) end_date : datetime64 (must be set) apply_date : datetime64 (must be set) The default of None is interpreted as "no adjustments to the baseline". """ def __init__(self, column, baseline, adjustments=None): self.column = column self.baseline = baseline.values.astype(self.column.dtype) self.dates = baseline.index self.assets = baseline.columns if adjustments is None: adjustments = DataFrame( index=DatetimeIndex([]), columns=ADJUSTMENT_COLUMNS, ) else: # Ensure that columns are in the correct order. adjustments = adjustments.reindex(ADJUSTMENT_COLUMNS, axis=1) adjustments.sort_values(['apply_date', 'sid'], inplace=True) self.adjustments = adjustments self.adjustment_apply_dates = DatetimeIndex(adjustments.apply_date) self.adjustment_end_dates = DatetimeIndex(adjustments.end_date) self.adjustment_sids = Int64Index(adjustments.sid) def format_adjustments(self, dates, assets): """ Build a dict of Adjustment objects in the format expected by AdjustedArray. Returns a dict of the form: { # Integer index into `dates` for the date on which we should # apply the list of adjustments. 1 : [ Float64Multiply(first_row=2, last_row=4, col=3, value=0.5), Float64Overwrite(first_row=3, last_row=5, col=1, value=2.0), ... ], ... } """ make_adjustment = partial(make_adjustment_from_labels, dates, assets) min_date, max_date = dates[[0, -1]] # TODO: Consider porting this to Cython. if len(self.adjustments) == 0: return {} # Mask for adjustments whose apply_dates are in the requested window of # dates. date_bounds = self.adjustment_apply_dates.slice_indexer( min_date, max_date, ) dates_filter = zeros(len(self.adjustments), dtype='bool') dates_filter[date_bounds] = True # Ignore adjustments whose apply_date is in range, but whose end_date # is out of range. dates_filter &= (self.adjustment_end_dates >= min_date) # Mask for adjustments whose sids are in the requested assets. sids_filter = self.adjustment_sids.isin(assets.values) adjustments_to_use = self.adjustments.loc[ dates_filter & sids_filter ].set_index('apply_date') # For each apply_date on which we have an adjustment, compute # the integer index of that adjustment's apply_date in `dates`. # Then build a list of Adjustment objects for that apply_date. # This logic relies on the sorting applied on the previous line. out = {} previous_apply_date = object() for row in adjustments_to_use.itertuples(): # This expansion depends on the ordering of the DataFrame columns, # defined above. apply_date, sid, value, kind, start_date, end_date = row if apply_date != previous_apply_date: # Get the next apply date if no exact match. row_loc = dates.get_loc(apply_date, method='bfill') current_date_adjustments = out[row_loc] = [] previous_apply_date = apply_date # Look up the approprate Adjustment constructor based on the value # of `kind`. current_date_adjustments.append( make_adjustment(start_date, end_date, sid, kind, value) ) return out def load_adjusted_array(self, domain, columns, dates, sids, mask): """ Load data from our stored baseline. """ if len(columns) != 1: raise ValueError( "Can't load multiple columns with DataFrameLoader" ) column = columns[0] self._validate_input_column(column) date_indexer = self.dates.get_indexer(dates) assets_indexer = self.assets.get_indexer(sids) # Boolean arrays with True on matched entries good_dates = (date_indexer != -1) good_assets = (assets_indexer != -1) data = self.baseline[ix_(date_indexer, assets_indexer)] mask = (good_assets & as_column(good_dates)) & mask # Mask out requested columns/rows that didn't match. data[~mask] = column.missing_value return { column: AdjustedArray( # Pull out requested columns/rows from our baseline data. data=data, adjustments=self.format_adjustments(dates, sids), missing_value=column.missing_value, ), } def _validate_input_column(self, column): """Make sure a passed column is our column. """ if column != self.column and column.unspecialize() != self.column: raise ValueError("Can't load unknown column %s" % column)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/frame.py
frame.py
import numpy as np import pandas as pd from zipline.errors import NoFurtherDataError from zipline.pipeline.common import TS_FIELD_NAME, SID_FIELD_NAME from zipline.utils.numpy_utils import categorical_dtype def is_sorted_ascending(a): """Check if a numpy array is sorted.""" return (np.fmax.accumulate(a) <= a).all() def validate_event_metadata(event_dates, event_timestamps, event_sids): assert is_sorted_ascending(event_dates), "event dates must be sorted" assert len(event_sids) == len(event_dates) == len(event_timestamps), \ "mismatched arrays: %d != %d != %d" % ( len(event_sids), len(event_dates), len(event_timestamps), ) def next_event_indexer(all_dates, data_query_cutoff, all_sids, event_dates, event_timestamps, event_sids): """ Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the next event for each sid at each moment in time. Locations where no next event was known will be filled with -1. Parameters ---------- all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions in ``all_dates``. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``. """ validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full((len(all_dates), len(all_sids)), -1, dtype=np.int64) sid_ixs = all_sids.searchsorted(event_sids) # side='right' here ensures that we include the event date itself # if it's in all_dates. dt_ixs = all_dates.searchsorted(event_dates, side='right') ts_ixs = data_query_cutoff.searchsorted(event_timestamps, side='right') # Walk backward through the events, writing the index of the event into # slots ranging from the event's timestamp to its asof. This depends for # correctness on the fact that event_dates is sorted in ascending order, # because we need to overwrite later events with earlier ones if their # eligible windows overlap. for i in range(len(event_sids) - 1, -1, -1): start_ix = ts_ixs[i] end_ix = dt_ixs[i] out[start_ix:end_ix, sid_ixs[i]] = i return out def previous_event_indexer(data_query_cutoff_times, all_sids, event_dates, event_timestamps, event_sids): """ Construct an index array that, when applied to an array of values, produces a 2D array containing the values associated with the previous event for each sid at each moment in time. Locations where no previous event was known will be filled with -1. Parameters ---------- data_query_cutoff : pd.DatetimeIndex The boundaries for the given trading sessions. all_dates : ndarray[datetime64[ns], ndim=1] Row labels for the target output. all_sids : ndarray[int, ndim=1] Column labels for the target output. event_dates : ndarray[datetime64[ns], ndim=1] Dates on which each input events occurred/will occur. ``event_dates`` must be in sorted order, and may not contain any NaT values. event_timestamps : ndarray[datetime64[ns], ndim=1] Dates on which we learned about each input event. event_sids : ndarray[int, ndim=1] Sids assocated with each input event. Returns ------- indexer : ndarray[int, ndim=2] An array of shape (len(all_dates), len(all_sids)) of indices into ``event_{dates,timestamps,sids}``. """ validate_event_metadata(event_dates, event_timestamps, event_sids) out = np.full( (len(data_query_cutoff_times), len(all_sids)), -1, dtype=np.int64, ) eff_dts = np.maximum(event_dates, event_timestamps) sid_ixs = all_sids.searchsorted(event_sids) dt_ixs = data_query_cutoff_times.searchsorted(eff_dts, side='right') # Walk backwards through the events, writing the index of the event into # slots ranging from max(event_date, event_timestamp) to the start of the # previously-written event. This depends for correctness on the fact that # event_dates is sorted in ascending order, because we need to have written # later events so we know where to stop forward-filling earlier events. last_written = {} for i in range(len(event_dates) - 1, -1, -1): sid_ix = sid_ixs[i] dt_ix = dt_ixs[i] out[dt_ix:last_written.get(sid_ix, None), sid_ix] = i last_written[sid_ix] = dt_ix return out def last_in_date_group(df, data_query_cutoff_times, assets, reindex=True, have_sids=True, extra_groupers=None): """ Determine the last piece of information known on each date in the date index for each group. Input df MUST be sorted such that the correct last item is chosen from each group. Parameters ---------- df : pd.DataFrame The DataFrame containing the data to be grouped. Must be sorted so that the correct last item is chosen from each group. data_query_cutoff_times : pd.DatetimeIndex The dates to use for grouping and reindexing. assets : pd.Int64Index The assets that should be included in the column multiindex. reindex : bool Whether or not the DataFrame should be reindexed against the date index. This will add back any dates to the index that were grouped away. have_sids : bool Whether or not the DataFrame has sids. If it does, they will be used in the groupby. extra_groupers : list of str Any extra field names that should be included in the groupby. Returns ------- last_in_group : pd.DataFrame A DataFrame with dates as the index and fields used in the groupby as levels of a multiindex of columns. """ idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted( pd.DatetimeIndex(df[TS_FIELD_NAME]), )]] if have_sids: idx += [SID_FIELD_NAME] if extra_groupers is None: extra_groupers = [] idx += extra_groupers last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby( idx, sort=False, ).last() # For the number of things that we're grouping by (except TS), unstack # the df. Done this way because of an unresolved pandas bug whereby # passing a list of levels with mixed dtypes to unstack causes the # resulting DataFrame to have all object-type columns. for _ in range(len(idx) - 1): last_in_group = last_in_group.unstack(-1) if reindex: if have_sids: cols = last_in_group.columns last_in_group = last_in_group.reindex( index=data_query_cutoff_times, columns=pd.MultiIndex.from_product( tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,), names=cols.names, ), ) else: last_in_group = last_in_group.reindex(data_query_cutoff_times) return last_in_group def ffill_across_cols(df, columns, name_map): """ Forward fill values in a DataFrame with special logic to handle cases that pd.DataFrame.ffill cannot and cast columns to appropriate types. Parameters ---------- df : pd.DataFrame The DataFrame to do forward-filling on. columns : list of BoundColumn The BoundColumns that correspond to columns in the DataFrame to which special filling and/or casting logic should be applied. name_map: map of string -> string Mapping from the name of each BoundColumn to the associated column name in `df`. """ df.ffill(inplace=True) # Fill in missing values specified by each column. This is made # significantly more complex by the fact that we need to work around # two pandas issues: # 1) When we have sids, if there are no records for a given sid for any # dates, pandas will generate a column full of NaNs for that sid. # This means that some of the columns in `dense_output` are now # float instead of the intended dtype, so we have to coerce back to # our expected type and convert NaNs into the desired missing value. # 2) DataFrame.ffill assumes that receiving None as a fill-value means # that no value was passed. Consequently, there's no way to tell # pandas to replace NaNs in an object column with None using fillna, # so we have to roll our own instead using df.where. for column in columns: column_name = name_map[column.name] # Special logic for strings since `fillna` doesn't work if the # missing value is `None`. if column.dtype == categorical_dtype: df[column_name] = df[ column.name ].where(pd.notnull(df[column_name]), column.missing_value) else: # We need to execute `fillna` before `astype` in case the # column contains NaNs and needs to be cast to bool or int. # This is so that the NaNs are replaced first, since pandas # can't convert NaNs for those types. df[column_name] = df[ column_name ].fillna(column.missing_value) #].fillna(column.missing_value).astype(column.dtype) def shift_dates(dates, start_date, end_date, shift): """ Shift dates of a pipeline query back by ``shift`` days. Parameters ---------- dates : DatetimeIndex All known dates. start_date : pd.Timestamp Start date of the pipeline query. end_date : pd.Timestamp End date of the pipeline query. shift : int The number of days to shift back the query dates. Returns ------- shifted : pd.DatetimeIndex The range [start_date, end_date] from ``dates``, shifted backwards by ``shift`` days. Raises ------ ValueError If ``start_date`` or ``end_date`` is not in ``dates``. NoFurtherDataError If shifting ``start_date`` back by ``shift`` days would push it off the end of ``dates``. """ try: start = dates.get_loc(start_date) except KeyError: if start_date < dates[0]: raise NoFurtherDataError( msg=( "Pipeline Query requested data starting on {query_start}, " "but first known date is {calendar_start}" ).format( query_start=str(start_date), calendar_start=str(dates[0]), ) ) else: raise ValueError("Query start %s not in calendar" % start_date) # Make sure that shifting doesn't push us out of the calendar. if start < shift: raise NoFurtherDataError( msg=( "Pipeline Query requested data from {shift}" " days before {query_start}, but first known date is only " "{start} days earlier." ).format(shift=shift, query_start=start_date, start=start), ) try: end = dates.get_loc(end_date) except KeyError: if end_date > dates[-1]: raise NoFurtherDataError( msg=( "Pipeline Query requesting data up to {query_end}, " "but last known date is {calendar_end}" ).format( query_end=end_date, calendar_end=dates[-1], ) ) else: raise ValueError("Query end %s not in calendar" % end_date) return dates[start - shift:end - shift + 1] # +1 to be inclusive
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/utils.py
utils.py
from abc import abstractmethod, abstractproperty from interface import implements import numpy as np import pandas as pd from six import viewvalues from toolz import groupby from zipline.lib.adjusted_array import AdjustedArray from zipline.lib.adjustment import ( Datetime641DArrayOverwrite, Datetime64Overwrite, Float641DArrayOverwrite, Float64Multiply, Float64Overwrite, ) from zipline.pipeline.common import ( EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) from zipline.pipeline.loaders.base import PipelineLoader from zipline.utils.numpy_utils import datetime64ns_dtype, float64_dtype from zipline.pipeline.loaders.utils import ( ffill_across_cols, last_in_date_group, ) INVALID_NUM_QTRS_MESSAGE = "Passed invalid number of quarters %s; " \ "must pass a number of quarters >= 0" NEXT_FISCAL_QUARTER = 'next_fiscal_quarter' NEXT_FISCAL_YEAR = 'next_fiscal_year' NORMALIZED_QUARTERS = 'normalized_quarters' PREVIOUS_FISCAL_QUARTER = 'previous_fiscal_quarter' PREVIOUS_FISCAL_YEAR = 'previous_fiscal_year' SHIFTED_NORMALIZED_QTRS = 'shifted_normalized_quarters' SIMULATION_DATES = 'dates' def normalize_quarters(years, quarters): return years * 4 + quarters - 1 def split_normalized_quarters(normalized_quarters): years = normalized_quarters // 4 quarters = normalized_quarters % 4 return years, quarters + 1 # These metadata columns are used to align event indexers. metadata_columns = frozenset({ TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, }) def required_estimates_fields(columns): """ Compute the set of resource columns required to serve `columns`. """ # We also expect any of the field names that our loadable columns # are mapped to. return metadata_columns.union(viewvalues(columns)) def validate_column_specs(events, columns): """ Verify that the columns of ``events`` can be used by a EarningsEstimatesLoader to serve the BoundColumns described by `columns`. """ required = required_estimates_fields(columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EarningsEstimatesLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) ) def add_new_adjustments(adjustments_dict, adjustments, column_name, ts): try: adjustments_dict[column_name][ts].extend(adjustments) except KeyError: adjustments_dict[column_name][ts] = adjustments class EarningsEstimatesLoader(implements(PipelineLoader)): """ An abstract pipeline loader for estimates data that can load data a variable number of quarters forwards/backwards from calendar dates depending on the `num_announcements` attribute of the columns' dataset. If split adjustments are to be applied, a loader, split-adjusted columns, and the split-adjusted asof-date must be supplied. Parameters ---------- estimates : pd.DataFrame The raw estimates data. ``estimates`` must contain at least 5 columns: sid : int64 The asset id associated with each estimate. event_date : datetime64[ns] The date on which the event that the estimate is for will/has occurred.. timestamp : datetime64[ns] The datetime where we learned about the estimate. fiscal_quarter : int64 The quarter during which the event has/will occur. fiscal_year : int64 The year during which the event has/will occur. name_map : dict[str -> str] A map of names of BoundColumns that this loader will load to the names of the corresponding columns in `events`. """ def __init__(self, estimates, name_map): validate_column_specs( estimates, name_map ) self.estimates = estimates[ estimates[EVENT_DATE_FIELD_NAME].notnull() & estimates[FISCAL_QUARTER_FIELD_NAME].notnull() & estimates[FISCAL_YEAR_FIELD_NAME].notnull() ] self.estimates[NORMALIZED_QUARTERS] = normalize_quarters( self.estimates[FISCAL_YEAR_FIELD_NAME], self.estimates[FISCAL_QUARTER_FIELD_NAME], ) self.array_overwrites_dict = { datetime64ns_dtype: Datetime641DArrayOverwrite, float64_dtype: Float641DArrayOverwrite, } self.scalar_overwrites_dict = { datetime64ns_dtype: Datetime64Overwrite, float64_dtype: Float64Overwrite, } self.name_map = name_map @abstractmethod def get_zeroth_quarter_idx(self, stacked_last_per_qtr): raise NotImplementedError('get_zeroth_quarter_idx') @abstractmethod def get_shifted_qtrs(self, zero_qtrs, num_announcements): raise NotImplementedError('get_shifted_qtrs') @abstractmethod def create_overwrite_for_estimate(self, column, column_name, last_per_qtr, next_qtr_start_idx, requested_quarter, sid, sid_idx, col_to_split_adjustments, split_adjusted_asof_idx): raise NotImplementedError('create_overwrite_for_estimate') @abstractproperty def searchsorted_side(self): return NotImplementedError('searchsorted_side') def get_requested_quarter_data(self, zero_qtr_data, zeroth_quarter_idx, stacked_last_per_qtr, num_announcements, dates): """ Selects the requested data for each date. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. zeroth_quarter_idx : pd.Index An index of calendar dates, sid, and normalized quarters, for only the rows that have a next or previous earnings estimate. stacked_last_per_qtr : pd.DataFrame The latest estimate known with the dates, normalized quarter, and sid as the index. num_announcements : int The number of annoucements out the user requested relative to each date in the calendar dates. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. Returns -------- requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns; `dates` are the index and columns are a MultiIndex with sids at the top level and the dataset columns on the bottom. """ zero_qtr_data_idx = zero_qtr_data.index requested_qtr_idx = pd.MultiIndex.from_arrays( [ zero_qtr_data_idx.get_level_values(0), zero_qtr_data_idx.get_level_values(1), self.get_shifted_qtrs( zeroth_quarter_idx.get_level_values( NORMALIZED_QUARTERS, ), num_announcements, ), ], names=[ zero_qtr_data_idx.names[0], zero_qtr_data_idx.names[1], SHIFTED_NORMALIZED_QTRS, ], ) requested_qtr_data = stacked_last_per_qtr.loc[requested_qtr_idx] requested_qtr_data = requested_qtr_data.reset_index( SHIFTED_NORMALIZED_QTRS, ) # Calculate the actual year/quarter being requested and add those in # as columns. (requested_qtr_data[FISCAL_YEAR_FIELD_NAME], requested_qtr_data[FISCAL_QUARTER_FIELD_NAME]) = \ split_normalized_quarters( requested_qtr_data[SHIFTED_NORMALIZED_QTRS] ) # Once we're left with just dates as the index, we can reindex by all # dates so that we have a value for each calendar date. return requested_qtr_data.unstack(SID_FIELD_NAME).reindex(dates) def get_split_adjusted_asof_idx(self, dates): """ Compute the index in `dates` where the split-adjusted-asof-date falls. This is the date up to which, and including which, we will need to unapply all adjustments for and then re-apply them as they come in. After this date, adjustments are applied as normal. Parameters ---------- dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. Returns ------- split_adjusted_asof_idx : int The index in `dates` at which the data should be split. """ split_adjusted_asof_idx = dates.searchsorted( self._split_adjusted_asof ) # The split-asof date is after the date index. if split_adjusted_asof_idx == len(dates): split_adjusted_asof_idx = len(dates) - 1 elif self._split_adjusted_asof < dates[0].tz_localize(None): split_adjusted_asof_idx = -1 return split_adjusted_asof_idx def collect_overwrites_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_idx, columns, all_adjustments_for_sid, sid): """ Given a sid, collect all overwrites that should be applied for this sid at each quarter boundary. Parameters ---------- group : pd.DataFrame The data for `sid`. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. sid_idx : int The sid's index in the asset index. columns : list of BoundColumn The columns for which the overwrites should be computed. all_adjustments_for_sid : dict[int -> AdjustedArray] A dictionary of the integer index of each timestamp into the date index, mapped to adjustments that should be applied at that index for the given sid (`sid`). This dictionary is modified as adjustments are collected. sid : int The sid for which overwrites should be computed. """ # If data was requested for only 1 date, there can never be any # overwrites, so skip the extra work. if len(dates) == 1: return next_qtr_start_indices = dates.searchsorted( group[EVENT_DATE_FIELD_NAME].values, side=self.searchsorted_side, ) qtrs_with_estimates = group.index.get_level_values( NORMALIZED_QUARTERS ).values for idx in next_qtr_start_indices: if 0 < idx < len(dates): # Find the quarter being requested in the quarter we're # crossing into. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid, ].iloc[idx] # Only add adjustments if the next quarter starts somewhere # in our date index for this sid. Our 'next' quarter can # never start at index 0; a starting index of 0 means that # the next quarter's event date was NaT. self.create_overwrites_for_quarter( all_adjustments_for_sid, idx, last_per_qtr, qtrs_with_estimates, requested_quarter, sid, sid_idx, columns ) def get_adjustments_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, **kwargs): """ Parameters ---------- group : pd.DataFrame The data for the given sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. requested_qtr_data : pd.DataFrame The DataFrame with the latest values for the requested quarter for all columns. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. sid_to_idx : dict[int -> int] A dictionary mapping sid to he sid's index in the asset index. columns : list of BoundColumn The columns for which the overwrites should be computed. col_to_all_adjustments : dict[int -> AdjustedArray] A dictionary of the integer index of each timestamp into the date index, mapped to adjustments that should be applied at that index. This dictionary is for adjustments for ALL sids. It is modified as adjustments are collected. kwargs : Additional arguments used in collecting adjustments; unused here. """ # Collect all adjustments for a given sid. all_adjustments_for_sid = {} sid = int(group.name) self.collect_overwrites_for_sid(group, dates, requested_qtr_data, last_per_qtr, sid_to_idx[sid], columns, all_adjustments_for_sid, sid) self.merge_into_adjustments_for_all_sids( all_adjustments_for_sid, col_to_all_adjustments ) def merge_into_adjustments_for_all_sids(self, all_adjustments_for_sid, col_to_all_adjustments): """ Merge adjustments for a particular sid into a dictionary containing adjustments for all sids. Parameters ---------- all_adjustments_for_sid : dict[int -> AdjustedArray] All adjustments for a particular sid. col_to_all_adjustments : dict[int -> AdjustedArray] All adjustments for all sids. """ for col_name in all_adjustments_for_sid: if col_name not in col_to_all_adjustments: col_to_all_adjustments[col_name] = {} for ts in all_adjustments_for_sid[col_name]: adjs = all_adjustments_for_sid[col_name][ts] add_new_adjustments(col_to_all_adjustments, adjs, col_name, ts) def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Creates an AdjustedArray from the given estimates data for the given dates. Parameters ---------- zero_qtr_data : pd.DataFrame The 'time zero' data for each calendar date per sid. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. assets : pd.Int64Index An index of all the assets from the raw data. columns : list of BoundColumn The columns for which adjustments need to be calculated. kwargs : Additional keyword arguments that should be forwarded to `get_adjustments_for_sid` and to be used in computing adjustments for each sid. Returns ------- col_to_all_adjustments : dict[int -> AdjustedArray] A dictionary of all adjustments that should be applied. """ zero_qtr_data.sort_index(inplace=True) # Here we want to get the LAST record from each group of records # corresponding to a single quarter. This is to ensure that we select # the most up-to-date event date in case the event date changes. quarter_shifts = zero_qtr_data.groupby( level=[SID_FIELD_NAME, NORMALIZED_QUARTERS] ).nth(-1) col_to_all_adjustments = {} sid_to_idx = dict(zip(assets, range(len(assets)))) quarter_shifts.groupby(level=SID_FIELD_NAME).apply( self.get_adjustments_for_sid, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, **kwargs ) return col_to_all_adjustments def create_overwrites_for_quarter(self, col_to_overwrites, next_qtr_start_idx, last_per_qtr, quarters_with_estimates_for_sid, requested_quarter, sid, sid_idx, columns): """ Add entries to the dictionary of columns to adjustments for the given sid and the given quarter. Parameters ---------- col_to_overwrites : dict [column_name -> list of ArrayAdjustment] A dictionary mapping column names to all overwrites for those columns. next_qtr_start_idx : int The index of the first day of the next quarter in the calendar dates. last_per_qtr : pd.DataFrame A DataFrame with a column MultiIndex of [self.estimates.columns, normalized_quarters, sid] that allows easily getting the timeline of estimates for a particular sid for a particular quarter; this is particularly useful for getting adjustments for 'next' estimates. quarters_with_estimates_for_sid : np.array An array of all quarters for which there are estimates for the given sid. requested_quarter : float The quarter for which the overwrite should be created. sid : int The sid for which to create overwrites. sid_idx : int The index of the sid in `assets`. columns : list of BoundColumn The columns for which to create overwrites. """ for col in columns: column_name = self.name_map[col.name] if column_name not in col_to_overwrites: col_to_overwrites[column_name] = {} # If there are estimates for the requested quarter, # overwrite all values going up to the starting index of # that quarter with estimates for that quarter. if requested_quarter in quarters_with_estimates_for_sid: adjs = self.create_overwrite_for_estimate( col, column_name, last_per_qtr, next_qtr_start_idx, requested_quarter, sid, sid_idx, ) add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx) # There are no estimates for the quarter. Overwrite all # values going up to the starting index of that quarter # with the missing value for this column. else: adjs = [self.overwrite_with_null( col, next_qtr_start_idx, sid_idx)] add_new_adjustments(col_to_overwrites, adjs, column_name, next_qtr_start_idx) def overwrite_with_null(self, column, next_qtr_start_idx, sid_idx): return self.scalar_overwrites_dict[column.dtype]( 0, next_qtr_start_idx - 1, sid_idx, sid_idx, column.missing_value ) def load_adjusted_array(self, domain, columns, dates, sids, mask): # Separate out getting the columns' datasets and the datasets' # num_announcements attributes to ensure that we're catching the right # AttributeError. col_to_datasets = {col: col.dataset for col in columns} try: groups = groupby(lambda col: col_to_datasets[col].num_announcements, col_to_datasets) except AttributeError: raise AttributeError("Datasets loaded via the " "EarningsEstimatesLoader must define a " "`num_announcements` attribute that defines " "how many quarters out the loader should load" " the data relative to `dates`.") if any(num_qtr < 0 for num_qtr in groups): raise ValueError( INVALID_NUM_QTRS_MESSAGE % ','.join( str(qtr) for qtr in groups if qtr < 0 ) ) out = {} # To optimize performance, only work below on assets that are # actually in the raw data. data_query_cutoff_times = domain.data_query_cutoff_for_sessions(dates) assets_with_data = set(sids) & set(self.estimates[SID_FIELD_NAME]) last_per_qtr, stacked_last_per_qtr = self.get_last_data_per_qtr( assets_with_data, columns, dates, data_query_cutoff_times, ) # Determine which quarter is immediately next/previous for each # date. zeroth_quarter_idx = self.get_zeroth_quarter_idx(stacked_last_per_qtr) zero_qtr_data = stacked_last_per_qtr.loc[zeroth_quarter_idx] for num_announcements, columns in groups.items(): requested_qtr_data = self.get_requested_quarter_data( zero_qtr_data, zeroth_quarter_idx, stacked_last_per_qtr, num_announcements, dates, ) # Calculate all adjustments for the given quarter and accumulate # them for each column. col_to_adjustments = self.get_adjustments( zero_qtr_data, requested_qtr_data, last_per_qtr, dates, sids, columns ) # Lookup the asset indexer once, this is so we can reindex # the assets returned into the assets requested for each column. # This depends on the fact that our column multiindex has the same # sids for each field. This allows us to do the lookup once on # level 1 instead of doing the lookup each time per value in # level 0. asset_indexer = sids.get_indexer_for( requested_qtr_data.columns.levels[1], ) for col in columns: column_name = self.name_map[col.name] # allocate the empty output with the correct missing value output_array = np.full( (len(dates), len(sids)), col.missing_value, dtype=col.dtype, ) # overwrite the missing value with values from the computed # data output_array[ :, asset_indexer, ] = requested_qtr_data[column_name].values out[col] = AdjustedArray( output_array, # There may not be any adjustments at all (e.g. if # len(date) == 1), so provide a default. dict(col_to_adjustments.get(column_name, {})), col.missing_value, ) return out def get_last_data_per_qtr(self, assets_with_data, columns, dates, data_query_cutoff_times): """ Determine the last piece of information we know for each column on each date in the index for each sid and quarter. Parameters ---------- assets_with_data : pd.Index Index of all assets that appear in the raw data given to the loader. columns : iterable of BoundColumn The columns that need to be loaded from the raw data. data_query_cutoff_times : pd.DatetimeIndex The calendar of dates for which data should be loaded. Returns ------- stacked_last_per_qtr : pd.DataFrame A DataFrame indexed by [dates, sid, normalized_quarters] that has the latest information for each row of the index, sorted by event date. last_per_qtr : pd.DataFrame A DataFrame with columns that are a MultiIndex of [ self.estimates.columns, normalized_quarters, sid]. """ # Get a DataFrame indexed by date with a MultiIndex of columns of # [self.estimates.columns, normalized_quarters, sid], where each cell # contains the latest data for that day. last_per_qtr = last_in_date_group( self.estimates, data_query_cutoff_times, assets_with_data, reindex=True, extra_groupers=[NORMALIZED_QUARTERS], ) last_per_qtr.index = dates # Forward fill values for each quarter/sid/dataset column. ffill_across_cols(last_per_qtr, columns, self.name_map) # Stack quarter and sid into the index. stacked_last_per_qtr = last_per_qtr.stack( [SID_FIELD_NAME, NORMALIZED_QUARTERS], ) # Set date index name for ease of reference stacked_last_per_qtr.index.set_names( SIMULATION_DATES, level=0, inplace=True, ) stacked_last_per_qtr = stacked_last_per_qtr.sort_values( EVENT_DATE_FIELD_NAME, ) stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] = pd.to_datetime( stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] ) return last_per_qtr, stacked_last_per_qtr class NextEarningsEstimatesLoader(EarningsEstimatesLoader): searchsorted_side = 'right' def create_overwrite_for_estimate(self, column, column_name, last_per_qtr, next_qtr_start_idx, requested_quarter, sid, sid_idx, col_to_split_adjustments=None, split_adjusted_asof_idx=None): return [self.array_overwrites_dict[column.dtype]( 0, next_qtr_start_idx - 1, sid_idx, sid_idx, last_per_qtr[ column_name, requested_quarter, sid, ].values[:next_qtr_start_idx], )] def get_shifted_qtrs(self, zero_qtrs, num_announcements): return zero_qtrs + (num_announcements - 1) def get_zeroth_quarter_idx(self, stacked_last_per_qtr): """ Filters for releases that are on or after each simulation date and determines the next quarter by picking out the upcoming release for each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- next_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a next event. """ next_releases_per_date = stacked_last_per_qtr.loc[ stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] >= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) ].groupby( level=[SIMULATION_DATES, SID_FIELD_NAME], as_index=False, # Here we take advantage of the fact that `stacked_last_per_qtr` is # sorted by event date. ).nth(0) return next_releases_per_date.index class PreviousEarningsEstimatesLoader(EarningsEstimatesLoader): searchsorted_side = 'left' def create_overwrite_for_estimate(self, column, column_name, dates, next_qtr_start_idx, requested_quarter, sid, sid_idx, col_to_split_adjustments=None, split_adjusted_asof_idx=None, split_dict=None): return [self.overwrite_with_null( column, next_qtr_start_idx, sid_idx, )] def get_shifted_qtrs(self, zero_qtrs, num_announcements): return zero_qtrs - (num_announcements - 1) def get_zeroth_quarter_idx(self, stacked_last_per_qtr): """ Filters for releases that are on or after each simulation date and determines the previous quarter by picking out the most recent release relative to each date in the index. Parameters ---------- stacked_last_per_qtr : pd.DataFrame A DataFrame with index of calendar dates, sid, and normalized quarters with each row being the latest estimate for the row's index values, sorted by event date. Returns ------- previous_releases_per_date_index : pd.MultiIndex An index of calendar dates, sid, and normalized quarters, for only the rows that have a previous event. """ previous_releases_per_date = stacked_last_per_qtr.loc[ stacked_last_per_qtr[EVENT_DATE_FIELD_NAME] <= stacked_last_per_qtr.index.get_level_values(SIMULATION_DATES) ].groupby( level=[SIMULATION_DATES, SID_FIELD_NAME], as_index=False, # Here we take advantage of the fact that `stacked_last_per_qtr` is # sorted by event date. ).nth(-1) return previous_releases_per_date.index def validate_split_adjusted_column_specs(name_map, columns): to_be_split = set(columns) available = set(name_map.keys()) extra = to_be_split - available if extra: raise ValueError( "EarningsEstimatesLoader got the following extra columns to be " "split-adjusted: {extra}.\n" "Got Columns: {to_be_split}\n" "Available Columns: {available}".format( extra=sorted(extra), to_be_split=sorted(to_be_split), available=sorted(available), ) ) class SplitAdjustedEstimatesLoader(EarningsEstimatesLoader): """ Estimates loader that loads data that needs to be split-adjusted. Parameters ---------- split_adjustments_loader : SQLiteAdjustmentReader The loader to use for reading split adjustments. split_adjusted_column_names : iterable of str The column names that should be split-adjusted. split_adjusted_asof : pd.Timestamp The date that separates data into 2 halves: the first half is the set of dates up to and including the split_adjusted_asof date. All adjustments occurring during this first half are applied to all dates in this first half. The second half is the set of dates after the split_adjusted_asof date. All adjustments occurring during this second half are applied sequentially as they appear in the timeline. """ def __init__(self, estimates, name_map, split_adjustments_loader, split_adjusted_column_names, split_adjusted_asof): validate_split_adjusted_column_specs(name_map, split_adjusted_column_names) self._split_adjustments = split_adjustments_loader self._split_adjusted_column_names = split_adjusted_column_names self._split_adjusted_asof = split_adjusted_asof self._split_adjustment_dict = {} super(SplitAdjustedEstimatesLoader, self).__init__( estimates, name_map ) @abstractmethod def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): raise NotImplementedError('collect_split_adjustments') def get_adjustments_for_sid(self, group, dates, requested_qtr_data, last_per_qtr, sid_to_idx, columns, col_to_all_adjustments, split_adjusted_asof_idx=None, split_adjusted_cols_for_group=None): """ Collects both overwrites and adjustments for a particular sid. Parameters ---------- split_adjusted_asof_idx : int The integer index of the date on which the data was split-adjusted. split_adjusted_cols_for_group : list of str The names of requested columns that should also be split-adjusted. """ all_adjustments_for_sid = {} sid = int(group.name) self.collect_overwrites_for_sid(group, dates, requested_qtr_data, last_per_qtr, sid_to_idx[sid], columns, all_adjustments_for_sid, sid) (pre_adjustments, post_adjustments) = self.retrieve_split_adjustment_data_for_sid( dates, sid, split_adjusted_asof_idx ) sid_estimates = self.estimates[ self.estimates[SID_FIELD_NAME] == sid ] # We might not have any overwrites but still have # adjustments, and we will need to manually add columns if # that is the case. for col_name in split_adjusted_cols_for_group: if col_name not in all_adjustments_for_sid: all_adjustments_for_sid[col_name] = {} self.collect_split_adjustments( all_adjustments_for_sid, requested_qtr_data, dates, sid, sid_to_idx[sid], sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, split_adjusted_cols_for_group ) self.merge_into_adjustments_for_all_sids( all_adjustments_for_sid, col_to_all_adjustments ) def get_adjustments(self, zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, **kwargs): """ Calculates both split adjustments and overwrites for all sids. """ split_adjusted_cols_for_group = [ self.name_map[col.name] for col in columns if self.name_map[col.name] in self._split_adjusted_column_names ] # Add all splits to the adjustment dict for this sid. split_adjusted_asof_idx = self.get_split_adjusted_asof_idx( dates ) return super(SplitAdjustedEstimatesLoader, self).get_adjustments( zero_qtr_data, requested_qtr_data, last_per_qtr, dates, assets, columns, split_adjusted_cols_for_group=split_adjusted_cols_for_group, split_adjusted_asof_idx=split_adjusted_asof_idx ) def determine_end_idx_for_adjustment(self, adjustment_ts, dates, upper_bound, requested_quarter, sid_estimates): """ Determines the date until which the adjustment at the given date index should be applied for the given quarter. Parameters ---------- adjustment_ts : pd.Timestamp The timestamp at which the adjustment occurs. dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. upper_bound : int The index of the upper bound in the calendar dates. This is the index until which the adjusment will be applied unless there is information for the requested quarter that comes in on or before that date. requested_quarter : float The quarter for which we are determining how the adjustment should be applied. sid_estimates : pd.DataFrame The DataFrame of estimates data for the sid for which we're applying the given adjustment. Returns ------- end_idx : int The last index to which the adjustment should be applied for the given quarter/sid. """ end_idx = upper_bound # Find the next newest kd that happens on or after # the date of this adjustment newest_kd_for_qtr = sid_estimates[ (sid_estimates[NORMALIZED_QUARTERS] == requested_quarter) & (sid_estimates[TS_FIELD_NAME] >= adjustment_ts) ][TS_FIELD_NAME].min() if pd.notnull(newest_kd_for_qtr): newest_kd_idx = dates.searchsorted( newest_kd_for_qtr ) # We have fresh information that comes in # before the end of the overwrite and # presumably is already split-adjusted to the # current split. We should stop applying the # adjustment the day before this new # information comes in. if newest_kd_idx <= upper_bound: end_idx = newest_kd_idx - 1 return end_idx def collect_pre_split_asof_date_adjustments( self, split_adjusted_asof_date_idx, sid_idx, pre_adjustments, requested_split_adjusted_columns ): """ Collect split adjustments that occur before the split-adjusted-asof-date. All those adjustments must first be UN-applied at the first date index and then re-applied on the appropriate dates in order to match point in time share pricing data. Parameters ---------- split_adjusted_asof_date_idx : int The index in the calendar dates as-of which all data was split-adjusted. sid_idx : int The index of the sid for which adjustments should be collected in the adjusted array. pre_adjustments : tuple(list(float), list(int)) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred on or before the split-asof-date. """ col_to_split_adjustments = {} if len(pre_adjustments[0]): adjustment_values, date_indexes = pre_adjustments for column_name in requested_split_adjusted_columns: col_to_split_adjustments[column_name] = {} # We need to undo all adjustments that happen before the # split_asof_date here by reversing the split ratio. col_to_split_adjustments[column_name][0] = [Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, 1 / future_adjustment ) for future_adjustment in adjustment_values] for adjustment, date_index in zip(adjustment_values, date_indexes): adj = Float64Multiply( 0, split_adjusted_asof_date_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments(col_to_split_adjustments, [adj], column_name, date_index) return col_to_split_adjustments def collect_post_asof_split_adjustments(self, post_adjustments, requested_qtr_data, sid, sid_idx, sid_estimates, requested_split_adjusted_columns): """ Collect split adjustments that occur after the split-adjusted-asof-date. Each adjustment needs to be applied to all dates on which knowledge for the requested quarter was older than the date of the adjustment. Parameters ---------- post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for this sid. requested_split_adjusted_columns : list of str The requested split adjusted columns. Returns ------- col_to_split_adjustments : dict[str -> dict[int -> list of Adjustment]] The adjustments for this sid that occurred after the split-asof-date. """ col_to_split_adjustments = {} if post_adjustments: # Get an integer index requested_qtr_timeline = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS ][sid].reset_index() requested_qtr_timeline = requested_qtr_timeline[ requested_qtr_timeline[sid].notnull() ] # Split the data into range by quarter and determine which quarter # was being requested in each range. # Split integer indexes up by quarter range qtr_ranges_idxs = np.split( requested_qtr_timeline.index, np.where(np.diff(requested_qtr_timeline[sid]) != 0)[0] + 1 ) requested_quarters_per_range = [requested_qtr_timeline[sid][r[0]] for r in qtr_ranges_idxs] # Try to apply each adjustment to each quarter range. for i, qtr_range in enumerate(qtr_ranges_idxs): for adjustment, date_index, timestamp in zip( *post_adjustments ): # In the default case, apply through the end of the quarter upper_bound = qtr_range[-1] # Find the smallest KD in estimates that is on or after the # date of the given adjustment. Apply the given adjustment # until that KD. end_idx = self.determine_end_idx_for_adjustment( timestamp, requested_qtr_data.index, upper_bound, requested_quarters_per_range[i], sid_estimates ) # In the default case, apply adjustment on the first day of # the quarter. start_idx = qtr_range[0] # If the adjustment happens during this quarter, apply the # adjustment on the day it happens. if date_index > start_idx: start_idx = date_index # We only want to apply the adjustment if we have any stale # data to apply it to. if qtr_range[0] <= end_idx: for column_name in requested_split_adjusted_columns: if column_name not in col_to_split_adjustments: col_to_split_adjustments[column_name] = {} adj = Float64Multiply( # Always apply from first day of qtr qtr_range[0], end_idx, sid_idx, sid_idx, adjustment ) add_new_adjustments( col_to_split_adjustments, [adj], column_name, start_idx ) return col_to_split_adjustments def retrieve_split_adjustment_data_for_sid(self, dates, sid, split_adjusted_asof_idx): """ dates : pd.DatetimeIndex The calendar dates. sid : int The sid for which we want to retrieve adjustments. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. Returns ------- pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. """ adjustments = self._split_adjustments.get_adjustments_for_sid( 'splits', sid ) sorted(adjustments, key=lambda adj: adj[0]) # Get rid of any adjustments that happen outside of our date index. adjustments = list(filter(lambda x: dates[0] <= x[0] <= dates[-1], adjustments)) adjustment_values = np.array([adj[1] for adj in adjustments]) timestamps = pd.DatetimeIndex([adj[0] for adj in adjustments]) # We need the first date on which we would have known about each # adjustment. date_indexes = dates.searchsorted(timestamps) pre_adjustment_idxs = np.where( date_indexes <= split_adjusted_asof_idx )[0] last_adjustment_split_asof_idx = -1 if len(pre_adjustment_idxs): last_adjustment_split_asof_idx = pre_adjustment_idxs.max() pre_adjustments = ( adjustment_values[:last_adjustment_split_asof_idx + 1], date_indexes[:last_adjustment_split_asof_idx + 1] ) post_adjustments = ( adjustment_values[last_adjustment_split_asof_idx + 1:], date_indexes[last_adjustment_split_asof_idx + 1:], timestamps[last_adjustment_split_asof_idx + 1:] ) return pre_adjustments, post_adjustments def _collect_adjustments(self, requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): pre_adjustments_dict = self.collect_pre_split_asof_date_adjustments( split_adjusted_asof_idx, sid_idx, pre_adjustments, requested_split_adjusted_columns ) post_adjustments_dict = self.collect_post_asof_split_adjustments( post_adjustments, requested_qtr_data, sid, sid_idx, sid_estimates, requested_split_adjusted_columns ) return pre_adjustments_dict, post_adjustments_dict def merge_split_adjustments_with_overwrites( self, pre, post, overwrites, requested_split_adjusted_columns ): """ Merge split adjustments with the dict containing overwrites. Parameters ---------- pre : dict[str -> dict[int -> list]] The adjustments that occur before the split-adjusted-asof-date. post : dict[str -> dict[int -> list]] The adjustments that occur after the split-adjusted-asof-date. overwrites : dict[str -> dict[int -> list]] The overwrites across all time. Adjustments will be merged into this dictionary. requested_split_adjusted_columns : list of str List of names of split adjusted columns that are being requested. """ for column_name in requested_split_adjusted_columns: # We can do a merge here because the timestamps in 'pre' and # 'post' are guaranteed to not overlap. if pre: # Either empty or contains all columns. for ts in pre[column_name]: add_new_adjustments( overwrites, pre[column_name][ts], column_name, ts ) if post: # Either empty or contains all columns. for ts in post[column_name]: add_new_adjustments( overwrites, post[column_name][ts], column_name, ts ) class PreviousSplitAdjustedEarningsEstimatesLoader( SplitAdjustedEstimatesLoader, PreviousEarningsEstimatesLoader ): def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for previous quarters and apply them to the given dictionary of splits for the given sid. Since overwrites just replace all estimates before the new quarter with NaN, we don't need to worry about re-applying split adjustments. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns ) class NextSplitAdjustedEarningsEstimatesLoader( SplitAdjustedEstimatesLoader, NextEarningsEstimatesLoader ): def collect_split_adjustments(self, adjustments_for_sid, requested_qtr_data, dates, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns): """ Collect split adjustments for future quarters. Re-apply adjustments that would be overwritten by overwrites. Merge split adjustments with overwrites into the given dictionary of splits for the given sid. Parameters ---------- adjustments_for_sid : dict[str -> dict[int -> list]] The dictionary of adjustments to which splits need to be added. Initially it contains only overwrites. requested_qtr_data : pd.DataFrame The requested quarter data for each calendar date per sid. dates : pd.DatetimeIndex The calendar dates for which estimates data is requested. sid : int The sid for which adjustments need to be collected. sid_idx : int The index of `sid` in the adjusted array. sid_estimates : pd.DataFrame The raw estimates data for the given sid. split_adjusted_asof_idx : int The index in `dates` as-of which the data is split adjusted. pre_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values and indexes in `dates` for adjustments that happened before the split-asof-date. post_adjustments : tuple(list(float), list(int), pd.DatetimeIndex) The adjustment values, indexes in `dates`, and timestamps for adjustments that happened after the split-asof-date. requested_split_adjusted_columns : list of str List of requested split adjusted column names. """ (pre_adjustments_dict, post_adjustments_dict) = self._collect_adjustments( requested_qtr_data, sid, sid_idx, sid_estimates, split_adjusted_asof_idx, pre_adjustments, post_adjustments, requested_split_adjusted_columns, ) for column_name in requested_split_adjusted_columns: for overwrite_ts in adjustments_for_sid[column_name]: # We need to cumulatively re-apply all adjustments up to the # split-adjusted-asof-date. We might not have any # pre-adjustments, so we should check for that. if overwrite_ts <= split_adjusted_asof_idx \ and pre_adjustments_dict: for split_ts in pre_adjustments_dict[column_name]: # The split has to have occurred during the span of # the overwrite. if split_ts < overwrite_ts: # Create new adjustments here so that we can # re-apply all applicable adjustments to ONLY # the dates being overwritten. adjustments_for_sid[ column_name ][overwrite_ts].extend([ Float64Multiply( 0, overwrite_ts - 1, sid_idx, sid_idx, adjustment.value ) for adjustment in pre_adjustments_dict[ column_name ][split_ts] ]) # After the split-adjusted-asof-date, we need to re-apply all # adjustments that occur after that date and within the # bounds of the overwrite. They need to be applied starting # from the first date and until an end date. The end date is # the date of the newest information we get about # `requested_quarter` that is >= `split_ts`, or if there is no # new knowledge before `overwrite_ts`, then it is the date # before `overwrite_ts`. else: # Overwrites happen at the first index of a new quarter, # so determine here which quarter that is. requested_quarter = requested_qtr_data[ SHIFTED_NORMALIZED_QTRS, sid ].iloc[overwrite_ts] for adjustment_value, date_index, timestamp in zip( *post_adjustments ): if split_adjusted_asof_idx < date_index < overwrite_ts: # Assume the entire overwrite contains stale data upper_bound = overwrite_ts - 1 end_idx = self.determine_end_idx_for_adjustment( timestamp, dates, upper_bound, requested_quarter, sid_estimates ) adjustments_for_sid[ column_name ][overwrite_ts].append( Float64Multiply( 0, end_idx, sid_idx, sid_idx, adjustment_value ) ) self.merge_split_adjustments_with_overwrites( pre_adjustments_dict, post_adjustments_dict, adjustments_for_sid, requested_split_adjusted_columns )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/earnings_estimates.py
earnings_estimates.py
import numpy as np import pandas as pd from interface import implements from six import viewvalues from toolz import groupby, merge from .base import PipelineLoader from zipline.pipeline.common import ( EVENT_DATE_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) from zipline.pipeline.loaders.frame import DataFrameLoader from zipline.pipeline.loaders.utils import ( next_event_indexer, previous_event_indexer, ) def required_event_fields(next_value_columns, previous_value_columns): """ Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. return { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), ) def validate_column_specs(events, next_value_columns, previous_value_columns): """ Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``. """ required = required_event_fields(next_value_columns, previous_value_columns) received = set(events.columns) missing = required - received if missing: raise ValueError( "EventsLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) ) class EventsLoader(implements(PipelineLoader)): """ Base class for PipelineLoaders that supports loading the next and previous value of an event field. Does not currently support adjustments. Parameters ---------- events : pd.DataFrame A DataFrame representing events (e.g. share buybacks or earnings announcements) associated with particular companies. ``events`` must contain at least three columns:: sid : int64 The asset id associated with each event. event_date : datetime64[ns] The date on which the event occurred. timestamp : datetime64[ns] The date on which we learned about the event. next_value_columns : dict[BoundColumn -> str] Map from dataset columns to raw field names that should be used when searching for a next event value. previous_value_columns : dict[BoundColumn -> str] Map from dataset columns to raw field names that should be used when searching for a previous event value. """ def __init__(self, events, next_value_columns, previous_value_columns): validate_column_specs( events, next_value_columns, previous_value_columns, ) events = events[events[EVENT_DATE_FIELD_NAME].notnull()] # We always work with entries from ``events`` directly as numpy arrays, # so we coerce from a frame to a dict of arrays here. self.events = { name: np.asarray(series) for name, series in ( events.sort_values(EVENT_DATE_FIELD_NAME).iteritems() ) } # Columns to load with self.load_next_events. self.next_value_columns = next_value_columns # Columns to load with self.load_previous_events. self.previous_value_columns = previous_value_columns def split_next_and_previous_event_columns(self, requested_columns): """ Split requested columns into columns that should load the next known value and columns that should load the previous known value. Parameters ---------- requested_columns : iterable[BoundColumn] Returns ------- next_cols, previous_cols : iterable[BoundColumn], iterable[BoundColumn] ``requested_columns``, partitioned into sub-sequences based on whether the column should produce values from the next event or the previous event """ def next_or_previous(c): if c in self.next_value_columns: return 'next' elif c in self.previous_value_columns: return 'previous' raise ValueError( "{c} not found in next_value_columns " "or previous_value_columns".format(c=c) ) groups = groupby(next_or_previous, requested_columns) return groups.get('next', ()), groups.get('previous', ()) def next_event_indexer(self, dates, data_query_cutoff, sids): return next_event_indexer( dates, data_query_cutoff, sids, self.events[EVENT_DATE_FIELD_NAME], self.events[TS_FIELD_NAME], self.events[SID_FIELD_NAME], ) def previous_event_indexer(self, data_query_time, sids): return previous_event_indexer( data_query_time, sids, self.events[EVENT_DATE_FIELD_NAME], self.events[TS_FIELD_NAME], self.events[SID_FIELD_NAME], ) def load_next_events(self, domain, columns, dates, data_query_time, sids, mask): if not columns: return {} return self._load_events( name_map=self.next_value_columns, indexer=self.next_event_indexer(dates, data_query_time, sids), domain=domain, columns=columns, dates=dates, sids=sids, mask=mask, ) def load_previous_events(self, domain, columns, dates, data_query_time, sids, mask): if not columns: return {} return self._load_events( name_map=self.previous_value_columns, indexer=self.previous_event_indexer(data_query_time, sids), domain=domain, columns=columns, dates=dates, sids=sids, mask=mask, ) def _load_events(self, name_map, indexer, domain, columns, dates, sids, mask): def to_frame(array): return pd.DataFrame(array, index=dates, columns=sids) assert indexer.shape == (len(dates), len(sids)) out = {} for c in columns: # Array holding the value for column `c` for every event we have. col_array = self.events[name_map[c]] if not len(col_array): # We don't have **any** events, so return col.missing_value # every day for every sid. We have to special case empty events # because in normal branch we depend on being able to index # with -1 for missing values, which fails if there are no # events at all. raw = np.full( (len(dates), len(sids)), c.missing_value, dtype=c.dtype, ) else: # Slot event values into sid/date locations using `indexer`. # This produces a 2D array of the same shape as `indexer`, # which must be (len(dates), len(sids))`. raw = col_array[indexer] # indexer will be -1 for locations where we don't have a known # value. Overwrite those locations with c.missing_value. raw[indexer < 0] = c.missing_value # Delegate the actual array formatting logic to a DataFrameLoader. loader = DataFrameLoader(c, to_frame(raw), adjustments=None) out[c] = loader.load_adjusted_array( domain, [c], dates, sids, mask, )[c] return out def load_adjusted_array(self, domain, columns, dates, sids, mask): data_query = domain.data_query_cutoff_for_sessions(dates) n, p = self.split_next_and_previous_event_columns(columns) return merge( self.load_next_events(domain, n, dates, data_query, sids, mask), self.load_previous_events(domain, p, dates, data_query, sids, mask) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/events.py
events.py
from __future__ import division, absolute_import from abc import ABCMeta, abstractproperty from functools import partial from itertools import count import warnings from weakref import WeakKeyDictionary import blaze as bz from datashape import ( Date, DateTime, Option, String, isrecord, isscalar, integral, ) from interface import implements import numpy as np from odo import odo import pandas as pd from six import with_metaclass, PY2, itervalues, iteritems from toolz import ( complement, compose, first, flip, groupby, memoize, merge, ) import toolz.curried.operator as op from toolz.curried.operator import getitem from zipline.pipeline.common import ( AD_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME ) from zipline.pipeline.data.dataset import DataSet, Column from zipline.pipeline.domain import GENERIC from zipline.pipeline.loaders.base import PipelineLoader from zipline.pipeline.sentinels import NotSpecified from zipline.lib.adjusted_array import can_represent_dtype from zipline.utils.input_validation import expect_element from zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning from zipline.utils.pool import SequentialPool try: from ._core import ( # noqa adjusted_arrays_from_rows_with_assets, adjusted_arrays_from_rows_without_assets, baseline_arrays_from_rows_with_assets, # reexport baseline_arrays_from_rows_without_assets, # reexport getname, ) except ImportError: def getname(column): return column.get('blaze_column_name', column.name) def barf(*args, **kwargs): raise RuntimeError( "zipline.pipeline.loaders.blaze._core failed to import" ) adjusted_arrays_from_rows_with_assets = barf adjusted_arrays_from_rows_without_assets = barf baseline_arrays_from_rows_with_assets = barf baseline_arrays_from_rows_without_assets = barf valid_deltas_node_types = ( bz.expr.Field, bz.expr.ReLabel, bz.expr.Symbol, ) traversable_nodes = ( bz.expr.Field, bz.expr.Label, ) is_invalid_deltas_node = complement(flip(isinstance, valid_deltas_node_types)) get__name__ = op.attrgetter('__name__') class InvalidField(with_metaclass(ABCMeta)): """A field that raises an exception indicating that the field was invalid. Parameters ---------- field : str The name of the field. type_ : dshape The shape of the field. """ @abstractproperty def error_format(self): # pragma: no cover raise NotImplementedError('error_format') def __init__(self, field, type_): self._field = field self._type = type_ def __get__(self, instance, owner): raise AttributeError( self.error_format.format(field=self._field, type_=self._type), ) class NonNumpyField(InvalidField): error_format = ( "field '{field}' was a non numpy compatible type: '{type_}'" ) class NonPipelineField(InvalidField): error_format = ( "field '{field}' was a non Pipeline API compatible type: '{type_}'" ) _new_names = ('BlazeDataSet_%d' % n for n in count()) def datashape_type_to_numpy(type_): """ Given a datashape type, return the associated numpy type. Maps datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the numpy datetime returned by datashape isn't supported by pipeline. Parameters ---------- type_: datashape.coretypes.Type The datashape type. Returns ------- type_ np.dtype The numpy dtype. """ if isinstance(type_, Option): type_ = type_.ty if isinstance(type_, DateTime): return np.dtype('datetime64[ns]') if isinstance(type_, String): return np.dtype(object) if type_ in integral: return np.dtype('int64') else: return type_.to_numpy_dtype() @memoize def new_dataset(expr, missing_values, domain): """ Creates or returns a dataset from a blaze expression. Parameters ---------- expr : Expr The blaze expression representing the values. missing_values : frozenset((name, value) pairs Association pairs column name and missing_value for that column. This needs to be a frozenset rather than a dict or tuple of tuples because we want a collection that's unordered but still hashable. domain : zipline.pipeline.domain.Domain Domain of the dataset to be created. Returns ------- ds : type A new dataset type. Notes ----- This function is memoized. repeated calls with the same inputs will return the same type. """ missing_values = dict(missing_values) class_dict = {'ndim': 2 if SID_FIELD_NAME in expr.fields else 1} for name, type_ in expr.dshape.measure.fields: # Don't generate a column for sid or timestamp, since they're # implicitly the labels if the arrays that will be passed to pipeline # Terms. if name in (SID_FIELD_NAME, TS_FIELD_NAME): continue type_ = datashape_type_to_numpy(type_) if can_represent_dtype(type_): col = Column( type_, missing_values.get(name, NotSpecified), ) else: col = NonPipelineField(name, type_) class_dict[name] = col if 'domain' in class_dict: raise ValueError("Got a column named 'domain' in new_dataset(). " "'domain' is reserved.") class_dict['domain'] = domain name = expr._name if name is None: name = next(_new_names) # unicode is a name error in py3 but the branch is only hit # when we are in python 2. if PY2 and isinstance(name, unicode): # pragma: no cover # noqa name = name.encode('utf-8') return type(name, (DataSet,), class_dict) def _check_resources(name, expr, resources): """Validate that the expression and resources passed match up. Parameters ---------- name : str The name of the argument we are checking. expr : Expr The potentially bound expr. resources The explicitly passed resources to compute expr. Raises ------ ValueError If the resources do not match for an expression. """ if expr is None: return bound = expr._resources() if not bound and resources is None: raise ValueError('no resources provided to compute %s' % name) if bound and resources: raise ValueError( 'explicit and implicit resources provided to compute %s' % name, ) def _check_datetime_field(name, measure): """Check that a field is a datetime inside some measure. Parameters ---------- name : str The name of the field to check. measure : Record The record to check the field of. Raises ------ TypeError If the field is not a datetime inside ``measure``. """ if not isinstance(measure[name], (Date, DateTime)): raise TypeError( "'{name}' field must be a '{dt}', not: '{dshape}'".format( name=name, dt=DateTime(), dshape=measure[name], ), ) class NoMetaDataWarning(UserWarning): """Warning used to signal that no deltas or checkpoints could be found and none were provided. Parameters ---------- expr : Expr The expression that was searched. field : {'deltas', 'checkpoints'} The field that was looked up. """ def __init__(self, expr, field): self._expr = expr self._field = field def __str__(self): return 'No %s could be inferred from expr: %s' % ( self._field, self._expr, ) no_metadata_rules = frozenset({'warn', 'raise', 'ignore'}) def _get_metadata(field, expr, metadata_expr, no_metadata_rule): """Find the correct metadata expression for the expression. Parameters ---------- field : {'deltas', 'checkpoints'} The kind of metadata expr to lookup. expr : Expr The baseline expression. metadata_expr : Expr, 'auto', or None The metadata argument. If this is 'auto', then the metadata table will be searched for by walking up the expression tree. If this cannot be reflected, then an action will be taken based on the ``no_metadata_rule``. no_metadata_rule : {'warn', 'raise', 'ignore'} How to handle the case where the metadata_expr='auto' but no expr could be found. Returns ------- metadata : Expr or None The deltas or metadata table to use. """ if isinstance(metadata_expr, bz.Expr) or metadata_expr is None: return metadata_expr try: # The error produced by expr[field_name] when field_name doesn't exist # is very expensive. Avoid that cost by doing the check ourselves. field_name = '_'.join(((expr._name or ''), field)) child = expr._child if field_name not in child.fields: raise AttributeError(field_name) return child[field_name] except (ValueError, AttributeError): if no_metadata_rule == 'raise': raise ValueError( "no %s table could be reflected for %s" % (field, expr) ) elif no_metadata_rule == 'warn': warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4) return None def _ad_as_ts(expr): """Duplicate the asof_date column as the timestamp column. Parameters ---------- expr : Expr or None The expression to change the columns of. Returns ------- transformed : Expr or None The transformed expression or None if ``expr`` is None. """ return ( None if expr is None else bz.transform(expr, **{TS_FIELD_NAME: expr[AD_FIELD_NAME]}) ) def _ensure_timestamp_field(dataset_expr, deltas, checkpoints): """Verify that the baseline and deltas expressions have a timestamp field. If there is not a ``TS_FIELD_NAME`` on either of the expressions, it will be copied from the ``AD_FIELD_NAME``. If one is provided, then we will verify that it is the correct dshape. Parameters ---------- dataset_expr : Expr The baseline expression. deltas : Expr or None The deltas expression if any was provided. checkpoints : Expr or None The checkpoints expression if any was provided. Returns ------- dataset_expr, deltas : Expr The new baseline and deltas expressions to use. """ measure = dataset_expr.dshape.measure if TS_FIELD_NAME not in measure.names: dataset_expr = bz.transform( dataset_expr, **{TS_FIELD_NAME: dataset_expr[AD_FIELD_NAME]} ) deltas = _ad_as_ts(deltas) checkpoints = _ad_as_ts(checkpoints) else: _check_datetime_field(TS_FIELD_NAME, measure) return dataset_expr, deltas, checkpoints @expect_element( no_deltas_rule=no_metadata_rules, no_checkpoints_rule=no_metadata_rules, ) def from_blaze(expr, deltas='auto', checkpoints='auto', loader=None, resources=None, odo_kwargs=None, missing_values=None, domain=GENERIC, no_deltas_rule='warn', no_checkpoints_rule='warn'): """Create a Pipeline API object from a blaze expression. Parameters ---------- expr : Expr The blaze expression to use. deltas : Expr, 'auto' or None, optional The expression to use for the point in time adjustments. If the string 'auto' is passed, a deltas expr will be looked up by stepping up the expression tree and looking for another field with the name of ``expr._name`` + '_deltas'. If None is passed, no deltas will be used. checkpoints : Expr, 'auto' or None, optional The expression to use for the forward fill checkpoints. If the string 'auto' is passed, a checkpoints expr will be looked up by stepping up the expression tree and looking for another field with the name of ``expr._name`` + '_checkpoints'. If None is passed, no checkpoints will be used. loader : BlazeLoader, optional The blaze loader to attach this pipeline dataset to. If None is passed, the global blaze loader is used. resources : dict or any, optional The data to execute the blaze expressions against. This is used as the scope for ``bz.compute``. odo_kwargs : dict, optional The keyword arguments to pass to odo when evaluating the expressions. domain : zipline.pipeline.domain.Domain Domain of the dataset to be created. missing_values : dict[str -> any], optional A dict mapping column names to missing values for those columns. Missing values are required for integral columns. no_deltas_rule : {'warn', 'raise', 'ignore'}, optional What should happen if ``deltas='auto'`` but no deltas can be found. 'warn' says to raise a warning but continue. 'raise' says to raise an exception if no deltas can be found. 'ignore' says take no action and proceed with no deltas. no_checkpoints_rule : {'warn', 'raise', 'ignore'}, optional What should happen if ``checkpoints='auto'`` but no checkpoints can be found. 'warn' says to raise a warning but continue. 'raise' says to raise an exception if no deltas can be found. 'ignore' says take no action and proceed with no deltas. Returns ------- pipeline_api_obj : DataSet or BoundColumn Either a new dataset or bound column based on the shape of the expr passed in. If a table shaped expression is passed, this will return a ``DataSet`` that represents the whole table. If an array-like shape is passed, a ``BoundColumn`` on the dataset that would be constructed from passing the parent is returned. """ if 'auto' in {deltas, checkpoints}: invalid_nodes = tuple(filter(is_invalid_deltas_node, expr._subterms())) if invalid_nodes: raise TypeError( 'expression with auto %s may only contain (%s) nodes,' " found: %s" % ( ' or '.join( ['deltas'] if deltas is not None else [] + ['checkpoints'] if checkpoints is not None else [], ), ', '.join(map(get__name__, valid_deltas_node_types)), ', '.join( set(map(compose(get__name__, type), invalid_nodes)), ), ), ) deltas = _get_metadata( 'deltas', expr, deltas, no_deltas_rule, ) checkpoints = _get_metadata( 'checkpoints', expr, checkpoints, no_checkpoints_rule, ) # Check if this is a single column out of a dataset. if bz.ndim(expr) != 1: raise TypeError( 'expression was not tabular or array-like,' ' %s dimensions: %d' % ( 'too many' if bz.ndim(expr) > 1 else 'not enough', bz.ndim(expr), ), ) single_column = None if isscalar(expr.dshape.measure): # This is a single column. Record which column we are to return # but create the entire dataset. single_column = rename = expr._name field_hit = False if not isinstance(expr, traversable_nodes): raise TypeError( "expression '%s' was array-like but not a simple field of" " some larger table" % str(expr), ) while isinstance(expr, traversable_nodes): if isinstance(expr, bz.expr.Field): if not field_hit: field_hit = True else: break rename = expr._name expr = expr._child dataset_expr = expr.relabel({rename: single_column}) else: dataset_expr = expr measure = dataset_expr.dshape.measure if not isrecord(measure) or AD_FIELD_NAME not in measure.names: raise TypeError( "The dataset must be a collection of records with at least an" " '{ad}' field. Fields provided: '{fields}'\nhint: maybe you need" " to use `relabel` to change your field names".format( ad=AD_FIELD_NAME, fields=measure, ), ) _check_datetime_field(AD_FIELD_NAME, measure) dataset_expr, deltas, checkpoints = _ensure_timestamp_field( dataset_expr, deltas, checkpoints, ) if deltas is not None and (sorted(deltas.dshape.measure.fields) != sorted(measure.fields)): raise TypeError( 'baseline measure != deltas measure:\n%s != %s' % ( measure, deltas.dshape.measure, ), ) if (checkpoints is not None and (sorted(checkpoints.dshape.measure.fields) != sorted(measure.fields))): raise TypeError( 'baseline measure != checkpoints measure:\n%s != %s' % ( measure, checkpoints.dshape.measure, ), ) # Ensure that we have a data resource to execute the query against. _check_resources('expr', dataset_expr, resources) _check_resources('deltas', deltas, resources) _check_resources('checkpoints', checkpoints, resources) # Create or retrieve the Pipeline API dataset. if missing_values is None: missing_values = {} ds = new_dataset(dataset_expr, frozenset(missing_values.items()), domain) # Register our new dataset with the loader. (loader if loader is not None else global_loader).register_dataset( ds, bind_expression_to_resources(dataset_expr, resources), bind_expression_to_resources(deltas, resources) if deltas is not None else None, bind_expression_to_resources(checkpoints, resources) if checkpoints is not None else None, odo_kwargs=odo_kwargs, ) if single_column is not None: # We were passed a single column, extract and return it. return getattr(ds, single_column) return ds getdataset = op.attrgetter('dataset') class ExprData(object): """A pair of expressions and data resources. The expressions will be computed using the resources as the starting scope. Parameters ---------- expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. """ def __init__(self, expr, deltas=None, checkpoints=None, odo_kwargs=None): self.expr = expr self.deltas = deltas self.checkpoints = checkpoints self._odo_kwargs = odo_kwargs def replace(self, **kwargs): base_kwargs = { 'expr': self.expr, 'deltas': self.deltas, 'checkpoints': self.checkpoints, 'odo_kwargs': self._odo_kwargs, } invalid_kwargs = set(kwargs) - set(base_kwargs) if invalid_kwargs: raise TypeError('invalid param(s): %s' % sorted(invalid_kwargs)) base_kwargs.update(kwargs) return type(self)(**base_kwargs) def __iter__(self): yield self.expr yield self.deltas yield self.checkpoints yield self.odo_kwargs @property def odo_kwargs(self): out = self._odo_kwargs if out is None: out = {} return out def __repr__(self): # If the expressions have _resources() then the repr will # drive computation so we take the str here. return ( 'ExprData(expr=%s, deltas=%s, checkpoints=%s, odo_kwargs=%r)' % ( self.expr, self.deltas, self.checkpoints, self.odo_kwargs, ) ) @staticmethod def _expr_eq(a, b): return a is b is None or a.isidentical(b) def __hash__(self): return hash(( self.expr, self.deltas, self.checkpoints, id(self._odo_kwargs), )) def __eq__(self, other): if not isinstance(other, ExprData): return NotImplemented return ( self._expr_eq(self.expr, other.expr) and self._expr_eq(self.deltas, other.deltas) and self._expr_eq(self.checkpoints, other.checkpoints) and self._odo_kwargs is other._odo_kwargs ) class BlazeLoader(implements(PipelineLoader)): """A PipelineLoader for datasets constructed with ``from_blaze``. Parameters ---------- dsmap : mapping, optional An initial mapping of datasets to ``ExprData`` objects. NOTE: Further mutations to this map will not be reflected by this object. pool : Pool, optional The pool to use to run blaze queries concurrently. This object must support ``imap_unordered``, ``apply`` and ``apply_async`` methods. Attributes ---------- pool : Pool The pool to use to run blaze queries concurrently. This object must support ``imap_unordered``, ``apply`` and ``apply_async`` methods. It is possible to change the pool after the loader has been constructed. This allows us to set a new pool for the ``global_loader`` like: ``global_loader.pool = multiprocessing.Pool(4)``. See Also -------- :class:`zipline.utils.pool.SequentialPool` :class:`multiprocessing.Pool` """ def __init__(self, dsmap=None, pool=SequentialPool()): # explicitly public self.pool = pool self._table_expressions = (dsmap or {}).copy() @classmethod @memoize(cache=WeakKeyDictionary()) def global_instance(cls): return cls() def __hash__(self): return id(self) def __contains__(self, column): return column in self._table_expressions def __getitem__(self, column): return self._table_expressions[column] def __iter__(self): return iter(self._table_expressions) def __len__(self): return len(self._table_expressions) def __call__(self, column): if column in self: return self raise KeyError(column) def register_dataset(self, dataset, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a datset to a collection of blaze expressions. Parameters ---------- dataset : DataSet The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """ expr_data = ExprData( expr, deltas, checkpoints, odo_kwargs, ) for column in dataset.columns: self._table_expressions[column] = expr_data def register_column(self, column, expr, deltas=None, checkpoints=None, odo_kwargs=None): """Explicitly map a single bound column to a collection of blaze expressions. The expressions need to have ``timestamp`` and ``as_of`` columns. Parameters ---------- column : BoundColumn The pipeline dataset to map to the given expressions. expr : Expr The baseline values. deltas : Expr, optional The deltas for the data. checkpoints : Expr, optional The forward fill checkpoints for the data. odo_kwargs : dict, optional The keyword arguments to forward to the odo calls internally. See Also -------- :func:`zipline.pipeline.loaders.blaze.from_blaze` """ self._table_expressions[column] = ExprData( expr, deltas, checkpoints, odo_kwargs, ) def load_adjusted_array(self, domain, columns, dates, sids, mask): data_query_cutoff_times = domain.data_query_cutoff_for_sessions( dates, ) return merge( self.pool.imap_unordered( partial( self._load_dataset, dates, data_query_cutoff_times, sids, mask, ), itervalues(groupby(getitem(self._table_expressions), columns)), ), ) def _load_dataset(self, dates, data_query_cutoff_times, assets, mask, columns): try: (expr_data,) = {self._table_expressions[c] for c in columns} except ValueError: raise AssertionError( 'all columns must share the same expression data', ) expr, deltas, checkpoints, odo_kwargs = expr_data have_sids = (first(columns).dataset.ndim == 2) added_query_fields = {AD_FIELD_NAME, TS_FIELD_NAME} | ( {SID_FIELD_NAME} if have_sids else set() ) requested_columns = set(map(getname, columns)) colnames = sorted(added_query_fields | requested_columns) lower_dt, upper_dt = data_query_cutoff_times[[0, -1]] def collect_expr(e, lower): """Materialize the expression as a dataframe. Parameters ---------- e : Expr The baseline or deltas expression. lower : datetime The lower time bound to query. Returns ------- result : pd.DataFrame The resulting dataframe. Notes ----- This can return more data than needed. The in memory reindex will handle this. """ predicate = e[TS_FIELD_NAME] < upper_dt if lower is not None: predicate &= e[TS_FIELD_NAME] >= lower return odo(e[predicate][colnames], pd.DataFrame, **odo_kwargs) lower, materialized_checkpoints = get_materialized_checkpoints( checkpoints, colnames, lower_dt, odo_kwargs ) materialized_expr_deferred = self.pool.apply_async( collect_expr, (expr, lower), ) materialized_deltas = ( self.pool.apply(collect_expr, (deltas, lower)) if deltas is not None else None ) # If the rows that come back from the blaze backend are constructed # from LabelArrays with Nones in the categories, pandas # complains. Ignore those warnings for now until we have a story for # updating our categorical missing values to NaN. with ignore_pandas_nan_categorical_warning(): all_rows = pd.concat( filter( lambda df: df is not None, ( materialized_checkpoints, materialized_expr_deferred.get(), materialized_deltas, ), ), ignore_index=True, copy=False, ) all_rows[TS_FIELD_NAME] = all_rows[TS_FIELD_NAME].astype( 'datetime64[ns]', ) all_rows.sort_values([TS_FIELD_NAME, AD_FIELD_NAME], inplace=True) if have_sids: return adjusted_arrays_from_rows_with_assets( dates, data_query_cutoff_times, assets, columns, all_rows, ) else: return adjusted_arrays_from_rows_without_assets( dates, data_query_cutoff_times, columns, all_rows, ) global_loader = BlazeLoader.global_instance() def bind_expression_to_resources(expr, resources): """ Bind a Blaze expression to resources. Parameters ---------- expr : bz.Expr The expression to which we want to bind resources. resources : dict[bz.Symbol -> any] Mapping from the loadable terms of ``expr`` to actual data resources. Returns ------- bound_expr : bz.Expr ``expr`` with bound resources. """ # bind the resources into the expression if resources is None: resources = {} # _subs stands for substitute. It's not actually private, blaze just # prefixes symbol-manipulation methods with underscores to prevent # collisions with data column names. return expr._subs({ k: bz.data(v, dshape=k.dshape) for k, v in iteritems(resources) }) def get_materialized_checkpoints(checkpoints, colnames, lower_dt, odo_kwargs): """ Computes a lower bound and a DataFrame checkpoints. Parameters ---------- checkpoints : Expr Bound blaze expression for a checkpoints table from which to get a computed lower bound. colnames : iterable of str The names of the columns for which checkpoints should be computed. lower_dt : pd.Timestamp The lower date being queried for that serves as an upper bound for checkpoints. odo_kwargs : dict, optional The extra keyword arguments to pass to ``odo``. """ if checkpoints is not None: ts = checkpoints[TS_FIELD_NAME] checkpoints_ts = odo( ts[ts < lower_dt].max(), pd.Timestamp, **odo_kwargs ) if pd.isnull(checkpoints_ts): # We don't have a checkpoint for before our start date so just # don't constrain the lower date. materialized_checkpoints = pd.DataFrame(columns=colnames) lower = None else: materialized_checkpoints = odo( checkpoints[ts == checkpoints_ts][colnames], pd.DataFrame, **odo_kwargs ) lower = checkpoints_ts else: materialized_checkpoints = pd.DataFrame(columns=colnames) lower = None # we don't have a good lower date constraint return lower, materialized_checkpoints def ffill_query_in_range(expr, lower, upper, checkpoints=None, odo_kwargs=None, ts_field=TS_FIELD_NAME): """Query a blaze expression in a given time range properly forward filling from values that fall before the lower date. Parameters ---------- expr : Expr Bound blaze expression. lower : datetime The lower date to query for. upper : datetime The upper date to query for. checkpoints : Expr, optional Bound blaze expression for a checkpoints table from which to get a computed lower bound. odo_kwargs : dict, optional The extra keyword arguments to pass to ``odo``. ts_field : str, optional The name of the timestamp field in the given blaze expression. Returns ------- raw : pd.DataFrame A strict dataframe for the data in the given date range. This may start before the requested start date if a value is needed to ffill. """ odo_kwargs = odo_kwargs or {} computed_lower, materialized_checkpoints = get_materialized_checkpoints( checkpoints, expr.fields, lower, odo_kwargs, ) pred = expr[ts_field] <= upper if computed_lower is not None: # only constrain the lower date if we computed a new lower date pred &= expr[ts_field] >= computed_lower raw = pd.concat( ( materialized_checkpoints, odo( expr[pred], pd.DataFrame, **odo_kwargs ), ), ignore_index=True, ) raw.loc[:, ts_field] = raw.loc[:, ts_field].astype('datetime64[ns]') return raw
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/blaze/core.py
core.py
from interface import implements from datashape import istabular from .core import ( bind_expression_to_resources, ) from zipline.pipeline.common import ( EVENT_DATE_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME, FISCAL_YEAR_FIELD_NAME, SID_FIELD_NAME, TS_FIELD_NAME, ) from zipline.pipeline.loaders.base import PipelineLoader from zipline.pipeline.loaders.blaze.utils import load_raw_data from zipline.pipeline.loaders.earnings_estimates import ( NextEarningsEstimatesLoader, PreviousEarningsEstimatesLoader, required_estimates_fields, metadata_columns, PreviousSplitAdjustedEarningsEstimatesLoader, NextSplitAdjustedEarningsEstimatesLoader, ) class BlazeEstimatesLoader(implements(PipelineLoader)): """An abstract pipeline loader for the estimates datasets that loads data from a blaze expression. Parameters ---------- expr : Expr The expression representing the data to load. columns : dict[str -> str] A dict mapping BoundColumn names to the associated names in `expr`. resources : dict, optional Mapping from the loadable terms of ``expr`` to actual data resources. odo_kwargs : dict, optional Extra keyword arguments to pass to odo when executing the expression. checkpoints : Expr, optional The expression representing checkpointed data to be used for faster forward-filling of data from `expr`. Notes ----- The expression should have a tabular dshape of:: Dim * {{ {SID_FIELD_NAME}: int64, {TS_FIELD_NAME}: datetime, {FISCAL_YEAR_FIELD_NAME}: float64, {FISCAL_QUARTER_FIELD_NAME}: float64, {EVENT_DATE_FIELD_NAME}: datetime, }} And other dataset-specific fields, where each row of the table is a record including the sid to identify the company, the timestamp where we learned about the announcement, and the date of the event. If the '{TS_FIELD_NAME}' field is not included it is assumed that we start the backtest with knowledge of all announcements. """ __doc__ = __doc__.format( SID_FIELD_NAME=SID_FIELD_NAME, TS_FIELD_NAME=TS_FIELD_NAME, FISCAL_YEAR_FIELD_NAME=FISCAL_YEAR_FIELD_NAME, FISCAL_QUARTER_FIELD_NAME=FISCAL_QUARTER_FIELD_NAME, EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME, ) def __init__(self, expr, columns, resources=None, odo_kwargs=None, checkpoints=None): dshape = expr.dshape if not istabular(dshape): raise ValueError( 'expression dshape must be tabular, got: %s' % dshape, ) required_cols = list( required_estimates_fields(columns) ) self._expr = bind_expression_to_resources( expr[required_cols], resources, ) self._columns = columns self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {} self._checkpoints = checkpoints def load_adjusted_array(self, domain, columns, dates, sids, mask): # Only load requested columns. requested_column_names = [self._columns[column.name] for column in columns] raw = load_raw_data( sids, dates, self._expr[sorted(metadata_columns.union(requested_column_names))], self._odo_kwargs, checkpoints=self._checkpoints, ) return self.loader( raw, {column.name: self._columns[column.name] for column in columns}, ).load_adjusted_array( domain, columns, dates, sids, mask, ) class BlazeNextEstimatesLoader(BlazeEstimatesLoader): loader = NextEarningsEstimatesLoader class BlazePreviousEstimatesLoader(BlazeEstimatesLoader): loader = PreviousEarningsEstimatesLoader class BlazeSplitAdjustedEstimatesLoader(BlazeEstimatesLoader): def __init__(self, expr, columns, split_adjustments_loader, split_adjusted_column_names, split_adjusted_asof, **kwargs): self._split_adjustments = split_adjustments_loader self._split_adjusted_column_names = split_adjusted_column_names self._split_adjusted_asof = split_adjusted_asof super(BlazeSplitAdjustedEstimatesLoader, self).__init__( expr, columns, **kwargs ) def load_adjusted_array(self, domain, columns, dates, sids, mask): # Only load requested columns. requested_column_names = [self._columns[column.name] for column in columns] requested_spilt_adjusted_columns = [ column_name for column_name in self._split_adjusted_column_names if column_name in requested_column_names ] raw = load_raw_data( sids, domain.data_query_cutoff_for_sessions(dates), self._expr[sorted(metadata_columns.union(requested_column_names))], self._odo_kwargs, checkpoints=self._checkpoints, ) return self.loader( raw, {column.name: self._columns[column.name] for column in columns}, self._split_adjustments, requested_spilt_adjusted_columns, self._split_adjusted_asof, ).load_adjusted_array( domain, columns, dates, sids, mask, ) class BlazeNextSplitAdjustedEstimatesLoader(BlazeSplitAdjustedEstimatesLoader): loader = NextSplitAdjustedEarningsEstimatesLoader class BlazePreviousSplitAdjustedEstimatesLoader( BlazeSplitAdjustedEstimatesLoader ): loader = PreviousSplitAdjustedEarningsEstimatesLoader
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/blaze/estimates.py
estimates.py
from interface import implements from datashape import istabular from .core import ( bind_expression_to_resources, ) from zipline.pipeline.common import ( SID_FIELD_NAME, TS_FIELD_NAME, EVENT_DATE_FIELD_NAME, ) from zipline.pipeline.loaders.base import PipelineLoader from zipline.pipeline.loaders.blaze.utils import load_raw_data from zipline.pipeline.loaders.events import ( EventsLoader, required_event_fields, ) class BlazeEventsLoader(implements(PipelineLoader)): """An abstract pipeline loader for the events datasets that loads data from a blaze expression. Parameters ---------- expr : Expr The expression representing the data to load. next_value_columns : dict[BoundColumn -> raw column name] A dict mapping 'next' BoundColumns to their column names in `expr`. previous_value_columns : dict[BoundColumn -> raw column name] A dict mapping 'previous' BoundColumns to their column names in `expr`. resources : dict, optional Mapping from the loadable terms of ``expr`` to actual data resources. odo_kwargs : dict, optional Extra keyword arguments to pass to odo when executing the expression. Notes ----- The expression should have a tabular dshape of:: Dim * {{ {SID_FIELD_NAME}: int64, {TS_FIELD_NAME}: datetime, {EVENT_DATE_FIELD_NAME}: datetime, }} And other dataset-specific fields, where each row of the table is a record including the sid to identify the company, the timestamp where we learned about the announcement, and the event date. If the '{TS_FIELD_NAME}' field is not included it is assumed that we start the backtest with knowledge of all announcements. """ __doc__ = __doc__.format(SID_FIELD_NAME=SID_FIELD_NAME, TS_FIELD_NAME=TS_FIELD_NAME, EVENT_DATE_FIELD_NAME=EVENT_DATE_FIELD_NAME) def __init__(self, expr, next_value_columns, previous_value_columns, resources=None, odo_kwargs=None): dshape = expr.dshape if not istabular(dshape): raise ValueError( 'expression dshape must be tabular, got: %s' % dshape, ) required_cols = list( required_event_fields(next_value_columns, previous_value_columns) ) self._expr = bind_expression_to_resources( expr[required_cols], resources, ) self._next_value_columns = next_value_columns self._previous_value_columns = previous_value_columns self._odo_kwargs = odo_kwargs if odo_kwargs is not None else {} def load_adjusted_array(self, domain, columns, dates, sids, mask): raw = load_raw_data( sids, domain.data_query_cutoff_for_sessions(dates), self._expr, self._odo_kwargs, ) return EventsLoader( events=raw, next_value_columns=self._next_value_columns, previous_value_columns=self._previous_value_columns, ).load_adjusted_array( domain, columns, dates, sids, mask, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/pipeline/loaders/blaze/events.py
events.py
import pandas as pd from sklearn.model_selection import train_test_split from zipline.data import bundles import dateutil.parser import pandas_datareader.data as yahoo_reader from zipline.pipeline.loaders import USEquityPricingLoader from zipline.data.data_portal import DataPortal from zipline.utils.calendars import get_calendar from zipline.pipeline.data import USEquityPricing from zipline.pipeline.engine import SimplePipelineEngine BUNDLE_DATA = None PRICING_LOADER = None END_DT = None def set_bundle_data(bundle_name='alpaca_api'): global BUNDLE_DATA, PRICING_LOADER BUNDLE_DATA = bundles.load(bundle_name) PRICING_LOADER = USEquityPricingLoader.without_fx(BUNDLE_DATA.equity_daily_bar_reader, BUNDLE_DATA.adjustment_reader) def choose_loader(column): """ Define the function for the get_loader parameter Set the dataloader""" if column not in USEquityPricing.columns: raise Exception('Column not in USEquityPricing') return PRICING_LOADER def create_data_portal(_bundle_name, _trading_calendar, start_date): global BUNDLE_DATA if not BUNDLE_DATA: set_bundle_data(_bundle_name) # Create a data portal data_portal = DataPortal(BUNDLE_DATA.asset_finder, trading_calendar=_trading_calendar, first_trading_day=start_date, equity_daily_reader=BUNDLE_DATA.equity_daily_bar_reader, adjustment_reader=BUNDLE_DATA.adjustment_reader) return data_portal def get_pricing(data_portal, trading_calendar, assets, start_date, end_date, field='close'): # Set the given start and end dates to Timestamps. The frequency string C is used to # indicate that a CustomBusinessDay DateOffset is used global END_DT END_DT = end_date start_dt = start_date # Get the locations of the start and end dates end_loc = trading_calendar.closes.index.get_loc(END_DT) start_loc = trading_calendar.closes.index.get_loc(start_dt) # return the historical data for the given window return data_portal.get_history_window(assets=assets, end_dt=END_DT, bar_count=end_loc - start_loc, frequency='1d', field=field, data_frequency='daily') def create_pipeline_engine(bundle_name='alpaca_api'): global BUNDLE_DATA if not BUNDLE_DATA: set_bundle_data(bundle_name) # Create a Pipeline engine engine = SimplePipelineEngine(get_loader=choose_loader, asset_finder=BUNDLE_DATA.asset_finder) return engine def get_equity(symbol): return BUNDLE_DATA.asset_finder.lookup_symbol(symbol, END_DT) def get_pipeline_output_for_equity(df, symbol, drop_level=False): """ pipeline output contains many equities, if you want to view the pipeline for jsut one equity you could use this method which slices a multiindex df (dates and equities) :param df: :param symbol: :param drop_level: if True it will drop the equity (level 1) index and return df with 1 level index. :return: """ equity = get_equity(symbol) df = df[df.index.get_level_values(1) == equity] if drop_level: df.index = df.index.droplevel(1) return df def pipeline_train_test_split(X, y, test_size=0.3, validate_size=0.3, should_validate=False): """ sklearn train_test_split :param df: :return: """ a, b, c, d = train_test_split(X.index.levels[0], y.index.levels[0], test_size=test_size, random_state=101) X_train = X.loc[list(a)] X_test = X.loc[list(b)] y_train = y.loc[list(c)] y_test = y.loc[list(d)] if should_validate: a, b, c, d = train_test_split(X_train.index.levels[0], y_train.index.levels[0], test_size=validate_size, random_state=101) X_train = X.loc[list(a)] X_validate = X.loc[list(b)] y_train = y.loc[list(c)] y_validate = y.loc[list(d)] return X_train, X_validate, X_test, y_train, y_validate, y_test return X_train, X_test, y_train, y_test class DATE(str): """ date string in the format YYYY-MM-DD """ def __new__(cls, value): if not value: raise ValueError('Unexpected empty string') if not isinstance(value, str): raise TypeError(f'Unexpected type for DATE: "{type(value)}"') if value.count("-") != 2: raise ValueError(f'Unexpected date structure. expected ' f'"YYYY-MM-DD" got {value}') try: dateutil.parser.parse(value) except Exception as e: msg = f"{value} is not a valid date string: {e}" raise Exception(msg) return str.__new__(cls, value) def get_benchmark(symbol=None, start: DATE = None, end: DATE = None, other_file_path=None): bm = yahoo_reader.DataReader(symbol, 'yahoo', pd.Timestamp(DATE(start)), pd.Timestamp(DATE(end)))['Close'] bm.index = bm.index.tz_localize('UTC') return bm.pct_change(periods=1).fillna(0)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/research/utils.py
utils.py
from zipline.utils.calendars import get_calendar class ExchangeInfo(object): """An exchange where assets are traded. Parameters ---------- name : str or None The full name of the exchange, for example 'NEW YORK STOCK EXCHANGE' or 'NASDAQ GLOBAL MARKET'. canonical_name : str The canonical name of the exchange, for example 'NYSE' or 'NASDAQ'. If None this will be the same as the name. country_code : str The country code where the exchange is located. Attributes ---------- name : str or None The full name of the exchange, for example 'NEW YORK STOCK EXCHANGE' or 'NASDAQ GLOBAL MARKET'. canonical_name : str The canonical name of the exchange, for example 'NYSE' or 'NASDAQ'. If None this will be the same as the name. country_code : str The country code where the exchange is located. calendar : TradingCalendar The trading calendar the exchange uses. """ def __init__(self, name, canonical_name, country_code): self.name = name if canonical_name is None: canonical_name = name self.canonical_name = canonical_name self.country_code = country_code.upper() def __repr__(self): return '%s(%r, %r, %r)' % ( type(self).__name__, self.name, self.canonical_name, self.country_code, ) @property def calendar(self): """The trading calendar that this exchange uses. """ return get_calendar(self.canonical_name) def __eq__(self, other): if not isinstance(other, ExchangeInfo): return NotImplemented return all( getattr(self, attr) == getattr(other, attr) for attr in ('name', 'canonical_name', 'country_code') ) def __ne__(self, other): eq = self == other if eq is NotImplemented: return NotImplemented return not eq
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/exchange_info.py
exchange_info.py
from abc import ABCMeta import array import binascii from collections import deque, namedtuple from functools import partial from numbers import Integral from operator import itemgetter, attrgetter import struct from logbook import Logger import numpy as np import pandas as pd from pandas import isnull from six import with_metaclass, string_types, viewkeys, iteritems import sqlalchemy as sa from sqlalchemy.sql import text from toolz import ( compose, concat, concatv, curry, groupby, merge, partition_all, sliding_window, valmap, ) from zipline.errors import ( EquitiesNotFound, FutureContractsNotFound, MultipleSymbolsFound, MultipleSymbolsFoundForFuzzySymbol, MultipleValuesFoundForField, MultipleValuesFoundForSid, NoValueForSid, ValueNotFoundForField, SameSymbolUsedAcrossCountries, SidsNotFound, SymbolNotFound, ) from . import ( Asset, Equity, Future, ) from . continuous_futures import ( ADJUSTMENT_STYLES, CHAIN_PREDICATES, ContinuousFuture, OrderedContracts, ) from .asset_writer import ( check_version_info, split_delimited_symbol, asset_db_table_names, symbol_columns, SQLITE_MAX_VARIABLE_NUMBER, ) from .asset_db_schema import ( ASSET_DB_VERSION ) from .exchange_info import ExchangeInfo from zipline.utils.functional import invert from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import as_column from zipline.utils.preprocess import preprocess from zipline.utils.db_utils import group_into_chunks, coerce_string_to_eng log = Logger('assets.py') # A set of fields that need to be converted to strings before building an # Asset to avoid unicode fields _asset_str_fields = frozenset({ 'symbol', 'asset_name', 'exchange', }) # A set of fields that need to be converted to timestamps in UTC _asset_timestamp_fields = frozenset({ 'start_date', 'end_date', 'first_traded', 'notice_date', 'expiration_date', 'auto_close_date', }) OwnershipPeriod = namedtuple('OwnershipPeriod', 'start end sid value') def merge_ownership_periods(mappings): """ Given a dict of mappings where the values are lists of OwnershipPeriod objects, returns a dict with the same structure with new OwnershipPeriod objects adjusted so that the periods have no gaps. Orders the periods chronologically, and pushes forward the end date of each period to match the start date of the following period. The end date of the last period pushed forward to the max Timestamp. """ return valmap( lambda v: tuple( OwnershipPeriod( a.start, b.start, a.sid, a.value, ) for a, b in sliding_window( 2, concatv( sorted(v), # concat with a fake ownership object to make the last # end date be max timestamp [OwnershipPeriod( pd.Timestamp.max.tz_localize('utc'), None, None, None, )], ), ) ), mappings, ) def _build_ownership_map_from_rows(rows, key_from_row, value_from_row): mappings = {} for row in rows: mappings.setdefault( key_from_row(row), [], ).append( OwnershipPeriod( pd.Timestamp(row.start_date, unit='ns', tz='utc'), pd.Timestamp(row.end_date, unit='ns', tz='utc'), row.sid, value_from_row(row), ), ) return merge_ownership_periods(mappings) def build_ownership_map(table, key_from_row, value_from_row): """ Builds a dict mapping to lists of OwnershipPeriods, from a db table. """ return _build_ownership_map_from_rows( sa.select(table.c).execute().fetchall(), key_from_row, value_from_row, ) def build_grouped_ownership_map(table, key_from_row, value_from_row, group_key): """ Builds a dict mapping group keys to maps of keys to lists of OwnershipPeriods, from a db table. """ grouped_rows = groupby( group_key, sa.select(table.c).execute().fetchall(), ) return { key: _build_ownership_map_from_rows( rows, key_from_row, value_from_row, ) for key, rows in grouped_rows.items() } @curry def _filter_kwargs(names, dict_): """Filter out kwargs from a dictionary. Parameters ---------- names : set[str] The names to select from ``dict_``. dict_ : dict[str, any] The dictionary to select from. Returns ------- kwargs : dict[str, any] ``dict_`` where the keys intersect with ``names`` and the values are not None. """ return {k: v for k, v in dict_.items() if k in names and v is not None} _filter_future_kwargs = _filter_kwargs(Future._kwargnames) _filter_equity_kwargs = _filter_kwargs(Equity._kwargnames) def _convert_asset_timestamp_fields(dict_): """ Takes in a dict of Asset init args and converts dates to pd.Timestamps """ for key in _asset_timestamp_fields & viewkeys(dict_): value = pd.Timestamp(dict_[key], tz='UTC') dict_[key] = None if isnull(value) else value return dict_ SID_TYPE_IDS = { # Asset would be 0, ContinuousFuture: 1, } CONTINUOUS_FUTURE_ROLL_STYLE_IDS = { 'calendar': 0, 'volume': 1, } CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS = { None: 0, 'div': 1, 'add': 2, } def _encode_continuous_future_sid(root_symbol, offset, roll_style, adjustment_style): s = struct.Struct("B 2B B B B 2B") # B - sid type # 2B - root symbol # B - offset (could be packed smaller since offsets of greater than 12 are # probably unneeded.) # B - roll type # B - adjustment # 2B - empty space left for parameterized roll types # The root symbol currently supports 2 characters. If 3 char root symbols # are needed, the size of the root symbol does not need to change, however # writing the string directly will need to change to a scheme of writing # the A-Z values in 5-bit chunks. a = array.array('B', [0] * s.size) rs = bytearray(root_symbol, 'ascii') values = (SID_TYPE_IDS[ContinuousFuture], rs[0], rs[1], offset, CONTINUOUS_FUTURE_ROLL_STYLE_IDS[roll_style], CONTINUOUS_FUTURE_ADJUSTMENT_STYLE_IDS[adjustment_style], 0, 0) s.pack_into(a, 0, *values) return int(binascii.hexlify(a), 16) Lifetimes = namedtuple('Lifetimes', 'sid start end') class AssetFinder(object): """ An AssetFinder is an interface to a database of Asset metadata written by an ``AssetDBWriter``. This class provides methods for looking up assets by unique integer id or by symbol. For historical reasons, we refer to these unique ids as 'sids'. Parameters ---------- engine : str or SQLAlchemy.engine An engine with a connection to the asset database to use, or a string that can be parsed by SQLAlchemy as a URI. future_chain_predicates : dict A dict mapping future root symbol to a predicate function which accepts a contract as a parameter and returns whether or not the contract should be included in the chain. See Also -------- :class:`zipline.assets.AssetDBWriter` """ @preprocess(engine=coerce_string_to_eng(require_exists=True)) def __init__(self, engine, future_chain_predicates=CHAIN_PREDICATES): self.engine = engine metadata = sa.MetaData(bind=engine) metadata.reflect(only=asset_db_table_names) for table_name in asset_db_table_names: setattr(self, table_name, metadata.tables[table_name]) # Check the version info of the db for compatibility check_version_info(engine, self.version_info, ASSET_DB_VERSION) # Cache for lookup of assets by sid, the objects in the asset lookup # may be shared with the results from equity and future lookup caches. # # The top level cache exists to minimize lookups on the asset type # routing. # # The caches are read through, i.e. accessing an asset through # retrieve_asset will populate the cache on first retrieval. self._asset_cache = {} self._asset_type_cache = {} self._caches = (self._asset_cache, self._asset_type_cache) self._future_chain_predicates = future_chain_predicates \ if future_chain_predicates is not None else {} self._ordered_contracts = {} # Populated on first call to `lifetimes`. self._asset_lifetimes = {} @lazyval def exchange_info(self): es = sa.select(self.exchanges.c).execute().fetchall() return { name: ExchangeInfo(name, canonical_name, country_code) for name, canonical_name, country_code in es } @lazyval def symbol_ownership_map(self): out = {} for mappings in self.symbol_ownership_maps_by_country_code.values(): for key, ownership_periods in mappings.items(): out.setdefault(key, []).extend(ownership_periods) return out @lazyval def symbol_ownership_maps_by_country_code(self): sid_to_country_code = dict( sa.select(( self.equities.c.sid, self.exchanges.c.country_code, )).where( self.equities.c.exchange == self.exchanges.c.exchange ).execute().fetchall(), ) return build_grouped_ownership_map( table=self.equity_symbol_mappings, key_from_row=( lambda row: (row.company_symbol, row.share_class_symbol) ), value_from_row=lambda row: row.symbol, group_key=lambda row: sid_to_country_code[row.sid], ) @lazyval def country_codes(self): return tuple(self.symbol_ownership_maps_by_country_code) @staticmethod def _fuzzify_symbol_ownership_map(ownership_map): fuzzy_mappings = {} for (cs, scs), owners in iteritems(ownership_map): fuzzy_owners = fuzzy_mappings.setdefault( cs + scs, [], ) fuzzy_owners.extend(owners) fuzzy_owners.sort() return fuzzy_mappings @lazyval def fuzzy_symbol_ownership_map(self): return self._fuzzify_symbol_ownership_map(self.symbol_ownership_map) @lazyval def fuzzy_symbol_ownership_maps_by_country_code(self): return valmap( self._fuzzify_symbol_ownership_map, self.symbol_ownership_maps_by_country_code, ) @lazyval def equity_supplementary_map(self): return build_ownership_map( table=self.equity_supplementary_mappings, key_from_row=lambda row: (row.field, row.value), value_from_row=lambda row: row.value, ) @lazyval def equity_supplementary_map_by_sid(self): return build_ownership_map( table=self.equity_supplementary_mappings, key_from_row=lambda row: (row.field, row.sid), value_from_row=lambda row: row.value, ) def lookup_asset_types(self, sids): """ Retrieve asset types for a list of sids. Parameters ---------- sids : list[int] Returns ------- types : dict[sid -> str or None] Asset types for the provided sids. """ found = {} missing = set() for sid in sids: try: found[sid] = self._asset_type_cache[sid] except KeyError: missing.add(sid) if not missing: return found router_cols = self.asset_router.c for assets in group_into_chunks(missing): query = sa.select((router_cols.sid, router_cols.asset_type)).where( self.asset_router.c.sid.in_(map(int, assets)) ) for sid, type_ in query.execute().fetchall(): missing.remove(sid) found[sid] = self._asset_type_cache[sid] = type_ for sid in missing: found[sid] = self._asset_type_cache[sid] = None return found def group_by_type(self, sids): """ Group a list of sids by asset type. Parameters ---------- sids : list[int] Returns ------- types : dict[str or None -> list[int]] A dict mapping unique asset types to lists of sids drawn from sids. If we fail to look up an asset, we assign it a key of None. """ return invert(self.lookup_asset_types(sids)) def retrieve_asset(self, sid, default_none=False): """ Retrieve the Asset for a given sid. """ try: asset = self._asset_cache[sid] if asset is None and not default_none: raise SidsNotFound(sids=[sid]) return asset except KeyError: return self.retrieve_all((sid,), default_none=default_none)[0] def retrieve_all(self, sids, default_none=False): """ Retrieve all assets in `sids`. Parameters ---------- sids : iterable of int Assets to retrieve. default_none : bool If True, return None for failed lookups. If False, raise `SidsNotFound`. Returns ------- assets : list[Asset or None] A list of the same length as `sids` containing Assets (or Nones) corresponding to the requested sids. Raises ------ SidsNotFound When a requested sid is not found and default_none=False. """ sids = list(sids) hits, missing, failures = {}, set(), [] for sid in sids: try: asset = self._asset_cache[sid] if not default_none and asset is None: # Bail early if we've already cached that we don't know # about an asset. raise SidsNotFound(sids=[sid]) hits[sid] = asset except KeyError: missing.add(sid) # All requests were cache hits. Return requested sids in order. if not missing: return [hits[sid] for sid in sids] update_hits = hits.update # Look up cache misses by type. type_to_assets = self.group_by_type(missing) # Handle failures failures = {failure: None for failure in type_to_assets.pop(None, ())} update_hits(failures) self._asset_cache.update(failures) if failures and not default_none: raise SidsNotFound(sids=list(failures)) # We don't update the asset cache here because it should already be # updated by `self.retrieve_equities`. update_hits(self.retrieve_equities(type_to_assets.pop('equity', ()))) update_hits( self.retrieve_futures_contracts(type_to_assets.pop('future', ())) ) # We shouldn't know about any other asset types. if type_to_assets: raise AssertionError( "Found asset types: %s" % list(type_to_assets.keys()) ) return [hits[sid] for sid in sids] def retrieve_equities(self, sids): """ Retrieve Equity objects for a list of sids. Users generally shouldn't need to this method (instead, they should prefer the more general/friendly `retrieve_assets`), but it has a documented interface and tests because it's used upstream. Parameters ---------- sids : iterable[int] Returns ------- equities : dict[int -> Equity] Raises ------ EquitiesNotFound When any requested asset isn't found. """ return self._retrieve_assets(sids, self.equities, Equity) def _retrieve_equity(self, sid): return self.retrieve_equities((sid,))[sid] def retrieve_futures_contracts(self, sids): """ Retrieve Future objects for an iterable of sids. Users generally shouldn't need to this method (instead, they should prefer the more general/friendly `retrieve_assets`), but it has a documented interface and tests because it's used upstream. Parameters ---------- sids : iterable[int] Returns ------- equities : dict[int -> Equity] Raises ------ EquitiesNotFound When any requested asset isn't found. """ return self._retrieve_assets(sids, self.futures_contracts, Future) @staticmethod def _select_assets_by_sid(asset_tbl, sids): return sa.select([asset_tbl]).where( asset_tbl.c.sid.in_(map(int, sids)) ) @staticmethod def _select_asset_by_symbol(asset_tbl, symbol): return sa.select([asset_tbl]).where(asset_tbl.c.symbol == symbol) def _select_most_recent_symbols_chunk(self, sid_group): """Retrieve the most recent symbol for a set of sids. Parameters ---------- sid_group : iterable[int] The sids to lookup. The length of this sequence must be less than or equal to SQLITE_MAX_VARIABLE_NUMBER because the sids will be passed in as sql bind params. Returns ------- sel : Selectable The sqlalchemy selectable that will query for the most recent symbol for each sid. Notes ----- We search for the values with the biggest end-date to get most recent info about symbol. First, we select the max end-date and sid then we join again to get information for this specific enddate and sid """ cols = self.equity_symbol_mappings.c # These are the columns we actually want. data_cols = [str(cols.sid)] + [str(cols[name]) for name in symbol_columns] # To be compatible with postgres we can't simple do a max and get all wanted fields # from the same row that the maximum came from. Instead we solve this by a subquery. # Sadly sqlalchemy in version < 1.4 does not support subquerys yet natively, we # construct it with string-interpolation for now max_cols = ','.join([str(cols.sid) + ' AS sid', 'MAX(' + str(cols.end_date) + ') AS max_date']) to_select = ','.join(data_cols) + ',max_dates.max_date' sids = ','.join([str(sid) for sid in sid_group]) max_date_select = f'(SELECT {max_cols} FROM {self.equity_symbol_mappings} GROUP BY {cols.sid}) AS max_dates' query = text(f'SELECT {to_select} FROM {max_date_select} ' f'JOIN {self.equity_symbol_mappings} ON {cols.sid} = max_dates.sid ' f' WHERE {cols.sid} IN ({sids}) AND {cols.end_date} = max_dates.max_date') return query def _lookup_most_recent_symbols(self, sids): return { row.sid: {c: row[c] for c in symbol_columns} for row in concat( self.engine.execute( self._select_most_recent_symbols_chunk(sid_group), ).fetchall() for sid_group in partition_all( SQLITE_MAX_VARIABLE_NUMBER, sids ) ) } def _retrieve_asset_dicts(self, sids, asset_tbl, querying_equities): if not sids: return if querying_equities: def mkdict(row, exchanges=self.exchange_info, symbols=self._lookup_most_recent_symbols(sids)): d = dict(row) d['exchange_info'] = exchanges[d.pop('exchange')] # we are not required to have a symbol for every asset, if # we don't have any symbols we will just use the empty string return merge(d, symbols.get(row['sid'], {})) else: def mkdict(row, exchanges=self.exchange_info): d = dict(row) d['exchange_info'] = exchanges[d.pop('exchange')] return d for assets in group_into_chunks(sids): # Load misses from the db. query = self._select_assets_by_sid(asset_tbl, assets) for row in query.execute().fetchall(): yield _convert_asset_timestamp_fields(mkdict(row)) def _retrieve_assets(self, sids, asset_tbl, asset_type): """ Internal function for loading assets from a table. This should be the only method of `AssetFinder` that writes Assets into self._asset_cache. Parameters --------- sids : iterable of int Asset ids to look up. asset_tbl : sqlalchemy.Table Table from which to query assets. asset_type : type Type of asset to be constructed. Returns ------- assets : dict[int -> Asset] Dict mapping requested sids to the retrieved assets. """ # Fastpath for empty request. if not sids: return {} cache = self._asset_cache hits = {} querying_equities = issubclass(asset_type, Equity) filter_kwargs = ( _filter_equity_kwargs if querying_equities else _filter_future_kwargs ) rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities) for row in rows: sid = row['sid'] asset = asset_type(**filter_kwargs(row)) hits[sid] = cache[sid] = asset # If we get here, it means something in our code thought that a # particular sid was an equity/future and called this function with a # concrete type, but we couldn't actually resolve the asset. This is # an error in our code, not a user-input error. misses = tuple(set(sids) - viewkeys(hits)) if misses: if querying_equities: raise EquitiesNotFound(sids=misses) else: raise FutureContractsNotFound(sids=misses) return hits def _lookup_symbol_strict(self, ownership_map, multi_country, symbol, as_of_date): """ Resolve a symbol to an asset object without fuzzy matching. Parameters ---------- ownership_map : dict[(str, str), list[OwnershipPeriod]] The mapping from split symbols to ownership periods. multi_country : bool Does this mapping span multiple countries? symbol : str The symbol to look up. as_of_date : datetime or None If multiple assets have held this sid, which day should the resolution be checked against? If this value is None and multiple sids have held the ticker, then a MultipleSymbolsFound error will be raised. Returns ------- asset : Asset The asset that held the given symbol. Raises ------ SymbolNotFound Raised when the symbol or symbol as_of_date pair do not map to any assets. MultipleSymbolsFound Raised when multiple assets held the symbol. This happens if multiple assets held the symbol at disjoint times and ``as_of_date`` is None, or if multiple assets held the symbol at the same time and``multi_country`` is True. Notes ----- The resolution algorithm is as follows: - Split the symbol into the company and share class component. - Do a dictionary lookup of the ``(company_symbol, share_class_symbol)`` in the provided ownership map. - If there is no entry in the dictionary, we don't know about this symbol so raise a ``SymbolNotFound`` error. - If ``as_of_date`` is None: - If more there is more than one owner, raise ``MultipleSymbolsFound`` - Otherwise, because the list mapped to a symbol cannot be empty, return the single asset. - Iterate through all of the owners: - If the ``as_of_date`` is between the start and end of the ownership period: - If multi_country is False, return the found asset. - Otherwise, put the asset in a list. - At the end of the loop, if there are no candidate assets, raise a ``SymbolNotFound``. - If there is exactly one candidate, return it. - Othewise, raise ``MultipleSymbolsFound`` because the ticker is not unique across countries. """ # split the symbol into the components, if there are no # company/share class parts then share_class_symbol will be empty company_symbol, share_class_symbol = split_delimited_symbol(symbol) try: owners = ownership_map[company_symbol, share_class_symbol] assert owners, 'empty owners list for %r' % symbol except KeyError: # no equity has ever held this symbol raise SymbolNotFound(symbol=symbol) if not as_of_date: # exactly one equity has ever held this symbol, we may resolve # without the date if len(owners) == 1: return self.retrieve_asset(owners[0].sid) options = {self.retrieve_asset(owner.sid) for owner in owners} if multi_country: country_codes = map(attrgetter('country_code'), options) if len(set(country_codes)) > 1: raise SameSymbolUsedAcrossCountries( symbol=symbol, options=dict(zip(country_codes, options)) ) # more than one equity has held this ticker, this # is ambiguous without the date raise MultipleSymbolsFound(symbol=symbol, options=options) options = [] country_codes = [] for start, end, sid, _ in owners: if start <= as_of_date < end: # find the equity that owned it on the given asof date asset = self.retrieve_asset(sid) # if this asset owned the symbol on this asof date and we are # only searching one country, return that asset if not multi_country: return asset else: options.append(asset) country_codes.append(asset.country_code) if not options: # no equity held the ticker on the given asof date raise SymbolNotFound(symbol=symbol) # if there is one valid option given the asof date, return that option if len(options) == 1: return options[0] # if there's more than one option given the asof date, a country code # must be passed to resolve the symbol to an asset raise SameSymbolUsedAcrossCountries( symbol=symbol, options=dict(zip(country_codes, options)) ) def _lookup_symbol_fuzzy(self, ownership_map, multi_country, symbol, as_of_date): symbol = symbol.upper() company_symbol, share_class_symbol = split_delimited_symbol(symbol) try: owners = ownership_map[company_symbol + share_class_symbol] assert owners, 'empty owners list for %r' % symbol except KeyError: # no equity has ever held a symbol matching the fuzzy symbol raise SymbolNotFound(symbol=symbol) if not as_of_date: if len(owners) == 1: # only one valid match return self.retrieve_asset(owners[0].sid) options = [] for _, _, sid, sym in owners: if sym == symbol: # there are multiple options, look for exact matches options.append(self.retrieve_asset(sid)) if len(options) == 1: # there was only one exact match return options[0] # there is more than one exact match for this fuzzy symbol raise MultipleSymbolsFoundForFuzzySymbol( symbol=symbol, options=self.retrieve_all(owner.sid for owner in owners), ) options = {} for start, end, sid, sym in owners: if start <= as_of_date < end: # see which fuzzy symbols were owned on the asof date. options[sid] = sym if not options: # no equity owned the fuzzy symbol on the date requested raise SymbolNotFound(symbol=symbol) sid_keys = list(options.keys()) # If there was only one owner, or there is a fuzzy and non-fuzzy which # map to the same sid, return it. if len(options) == 1: return self.retrieve_asset(sid_keys[0]) exact_options = [] for sid, sym in options.items(): # Possible to have a scenario where multiple fuzzy matches have the # same date. Want to find the one where symbol and share class # match. if ((company_symbol, share_class_symbol) == split_delimited_symbol(sym)): asset = self.retrieve_asset(sid) if not multi_country: return asset else: exact_options.append(asset) if len(exact_options) == 1: return exact_options[0] # multiple equities held tickers matching the fuzzy ticker but # there are no exact matches raise MultipleSymbolsFoundForFuzzySymbol( symbol=symbol, options=self.retrieve_all(owner.sid for owner in owners), ) def _choose_fuzzy_symbol_ownership_map(self, country_code): if country_code is None: return self.fuzzy_symbol_ownership_map return self.fuzzy_symbol_ownership_maps_by_country_code.get( country_code, ) def _choose_symbol_ownership_map(self, country_code): if country_code is None: return self.symbol_ownership_map return self.symbol_ownership_maps_by_country_code.get(country_code) def lookup_symbol(self, symbol, as_of_date, fuzzy=False, country_code=None): """Lookup an equity by symbol. Parameters ---------- symbol : str The ticker symbol to resolve. as_of_date : datetime.datetime or None Look up the last owner of this symbol as of this datetime. If ``as_of_date`` is None, then this can only resolve the equity if exactly one equity has ever owned the ticker. fuzzy : bool, optional Should fuzzy symbol matching be used? Fuzzy symbol matching attempts to resolve differences in representations for shareclasses. For example, some people may represent the ``A`` shareclass of ``BRK`` as ``BRK.A``, where others could write ``BRK_A``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equity : Equity The equity that held ``symbol`` on the given ``as_of_date``, or the only equity to hold ``symbol`` if ``as_of_date`` is None. Raises ------ SymbolNotFound Raised when no equity has ever held the given symbol. MultipleSymbolsFound Raised when no ``as_of_date`` is given and more than one equity has held ``symbol``. This is also raised when ``fuzzy=True`` and there are multiple candidates for the given ``symbol`` on the ``as_of_date``. Also raised when no ``country_code`` is given and the symbol is ambiguous across multiple countries. """ if symbol is None: raise TypeError("Cannot lookup asset for symbol of None for " "as of date %s." % as_of_date) if fuzzy: f = self._lookup_symbol_fuzzy mapping = self._choose_fuzzy_symbol_ownership_map(country_code) else: f = self._lookup_symbol_strict mapping = self._choose_symbol_ownership_map(country_code) if mapping is None: raise SymbolNotFound(symbol=symbol) return f( mapping, country_code is None, symbol, as_of_date, ) def get_max_sid(self): table = self.equity_symbol_mappings max_id = pd.read_sql(f'SELECT MAX(sid) max_id FROM {table}', self.engine) if len(max_id) == 0 or max_id['max_id'][0] == None: return -1 return max_id['max_id'][0] def lookup_symbols(self, symbols, as_of_date, fuzzy=False, country_code=None): """ Lookup a list of equities by symbol. Equivalent to:: [finder.lookup_symbol(s, as_of, fuzzy) for s in symbols] but potentially faster because repeated lookups are memoized. Parameters ---------- symbols : sequence[str] Sequence of ticker symbols to resolve. as_of_date : pd.Timestamp Forwarded to ``lookup_symbol``. fuzzy : bool, optional Forwarded to ``lookup_symbol``. country_code : str or None, optional The country to limit searches to. If not provided, the search will span all countries which increases the likelihood of an ambiguous lookup. Returns ------- equities : list[Equity] """ if not symbols: return [] multi_country = country_code is None if fuzzy: f = self._lookup_symbol_fuzzy mapping = self._choose_fuzzy_symbol_ownership_map(country_code) else: f = self._lookup_symbol_strict mapping = self._choose_symbol_ownership_map(country_code) if mapping is None: raise SymbolNotFound(symbol=symbols[0]) memo = {} out = [] append_output = out.append for sym in symbols: if sym in memo: append_output(memo[sym]) else: equity = memo[sym] = f( mapping, multi_country, sym, as_of_date, ) append_output(equity) return out def lookup_future_symbol(self, symbol): """Lookup a future contract by symbol. Parameters ---------- symbol : str The symbol of the desired contract. Returns ------- future : Future The future contract referenced by ``symbol``. Raises ------ SymbolNotFound Raised when no contract named 'symbol' is found. """ data = self._select_asset_by_symbol(self.futures_contracts, symbol)\ .execute().fetchone() # If no data found, raise an exception if not data: raise SymbolNotFound(symbol=symbol) return self.retrieve_asset(data['sid']) def lookup_by_supplementary_field(self, field_name, value, as_of_date): try: owners = self.equity_supplementary_map[ field_name, value, ] assert owners, 'empty owners list for %r' % (field_name, value) except KeyError: # no equity has ever held this value raise ValueNotFoundForField(field=field_name, value=value) if not as_of_date: if len(owners) > 1: # more than one equity has held this value, this is ambigious # without the date raise MultipleValuesFoundForField( field=field_name, value=value, options=set(map( compose(self.retrieve_asset, attrgetter('sid')), owners, )), ) # exactly one equity has ever held this value, we may resolve # without the date return self.retrieve_asset(owners[0].sid) for start, end, sid, _ in owners: if start <= as_of_date < end: # find the equity that owned it on the given asof date return self.retrieve_asset(sid) # no equity held the value on the given asof date raise ValueNotFoundForField(field=field_name, value=value) def get_supplementary_field(self, sid, field_name, as_of_date): """Get the value of a supplementary field for an asset. Parameters ---------- sid : int The sid of the asset to query. field_name : str Name of the supplementary field. as_of_date : pd.Timestamp, None The last known value on this date is returned. If None, a value is returned only if we've only ever had one value for this sid. If None and we've had multiple values, MultipleValuesFoundForSid is raised. Raises ------ NoValueForSid If we have no values for this asset, or no values was known on this as_of_date. MultipleValuesFoundForSid If we have had multiple values for this asset over time, and None was passed for as_of_date. """ try: periods = self.equity_supplementary_map_by_sid[ field_name, sid, ] assert periods, 'empty periods list for %r' % (field_name, sid) except KeyError: raise NoValueForSid(field=field_name, sid=sid) if not as_of_date: if len(periods) > 1: # This equity has held more than one value, this is ambigious # without the date raise MultipleValuesFoundForSid( field=field_name, sid=sid, options={p.value for p in periods}, ) # this equity has only ever held this value, we may resolve # without the date return periods[0].value for start, end, _, value in periods: if start <= as_of_date < end: return value # Could not find a value for this sid on the as_of_date. raise NoValueForSid(field=field_name, sid=sid) def _get_contract_sids(self, root_symbol): fc_cols = self.futures_contracts.c return [r.sid for r in list(sa.select((fc_cols.sid,)).where( (fc_cols.root_symbol == root_symbol) & (fc_cols.start_date != pd.NaT.value)).order_by( fc_cols.sid).execute().fetchall())] def _get_root_symbol_exchange(self, root_symbol): fc_cols = self.futures_root_symbols.c fields = (fc_cols.exchange,) exchange = sa.select(fields).where( fc_cols.root_symbol == root_symbol).execute().scalar() if exchange is not None: return exchange else: raise SymbolNotFound(symbol=root_symbol) def get_ordered_contracts(self, root_symbol): try: return self._ordered_contracts[root_symbol] except KeyError: contract_sids = self._get_contract_sids(root_symbol) contracts = deque(self.retrieve_all(contract_sids)) chain_predicate = self._future_chain_predicates.get(root_symbol, None) oc = OrderedContracts(root_symbol, contracts, chain_predicate) self._ordered_contracts[root_symbol] = oc return oc def create_continuous_future(self, root_symbol, offset, roll_style, adjustment): if adjustment not in ADJUSTMENT_STYLES: raise ValueError( 'Invalid adjustment style {!r}. Allowed adjustment styles are ' '{}.'.format(adjustment, list(ADJUSTMENT_STYLES)) ) oc = self.get_ordered_contracts(root_symbol) exchange = self._get_root_symbol_exchange(root_symbol) sid = _encode_continuous_future_sid(root_symbol, offset, roll_style, None) mul_sid = _encode_continuous_future_sid(root_symbol, offset, roll_style, 'div') add_sid = _encode_continuous_future_sid(root_symbol, offset, roll_style, 'add') cf_template = partial( ContinuousFuture, root_symbol=root_symbol, offset=offset, roll_style=roll_style, start_date=oc.start_date, end_date=oc.end_date, exchange_info=self.exchange_info[exchange], ) cf = cf_template(sid=sid) mul_cf = cf_template(sid=mul_sid, adjustment='mul') add_cf = cf_template(sid=add_sid, adjustment='add') self._asset_cache[cf.sid] = cf self._asset_cache[mul_cf.sid] = mul_cf self._asset_cache[add_cf.sid] = add_cf return {None: cf, 'mul': mul_cf, 'add': add_cf}[adjustment] def _make_sids(tblattr): def _(self): return tuple(map( itemgetter('sid'), sa.select(( getattr(self, tblattr).c.sid, )).execute().fetchall(), )) return _ sids = property( _make_sids('asset_router'), doc='All the sids in the asset finder.', ) equities_sids = property( _make_sids('equities'), doc='All of the sids for equities in the asset finder.', ) futures_sids = property( _make_sids('futures_contracts'), doc='All of the sids for futures consracts in the asset finder.', ) del _make_sids def _lookup_generic_scalar(self, obj, as_of_date, country_code, matches, missing): """ Convert asset_convertible to an asset. On success, append to matches. On failure, append to missing. """ result = self._lookup_generic_scalar_helper( obj, as_of_date, country_code, ) if result is not None: matches.append(result) else: missing.append(obj) def _lookup_generic_scalar_helper(self, obj, as_of_date, country_code): if isinstance(obj, (Asset, ContinuousFuture)): return obj if isinstance(obj, Integral): try: return self.retrieve_asset(int(obj)) except SidsNotFound: return None if isinstance(obj, string_types): # Try to look up as an equity first. try: return self.lookup_symbol( symbol=obj, as_of_date=as_of_date, country_code=country_code ) except SymbolNotFound: # Fall back to lookup as a Future try: # TODO: Support country_code for future_symbols? return self.lookup_future_symbol(obj) except SymbolNotFound: return None raise NotAssetConvertible("Input was %s, not AssetConvertible." % obj) def lookup_generic(self, obj, as_of_date, country_code): """ Convert an object into an Asset or sequence of Assets. This method exists primarily as a convenience for implementing user-facing APIs that can handle multiple kinds of input. It should not be used for internal code where we already know the expected types of our inputs. Parameters ---------- obj : int, str, Asset, ContinuousFuture, or iterable The object to be converted into one or more Assets. Integers are interpreted as sids. Strings are interpreted as tickers. Assets and ContinuousFutures are returned unchanged. as_of_date : pd.Timestamp or None Timestamp to use to disambiguate ticker lookups. Has the same semantics as in `lookup_symbol`. country_code : str or None ISO-3166 country code to use to disambiguate ticker lookups. Has the same semantics as in `lookup_symbol`. Returns ------- matches, missing : tuple ``matches`` is the result of the conversion. ``missing`` is a list containing any values that couldn't be resolved. If ``obj`` is not an iterable, ``missing`` will be an empty list. """ matches = [] missing = [] # Interpret input as scalar. if isinstance(obj, (AssetConvertible, ContinuousFuture)): self._lookup_generic_scalar( obj=obj, as_of_date=as_of_date, country_code=country_code, matches=matches, missing=missing, ) try: return matches[0], missing except IndexError: if hasattr(obj, '__int__'): raise SidsNotFound(sids=[obj]) else: raise SymbolNotFound(symbol=obj) # Interpret input as iterable. try: iterator = iter(obj) except TypeError: raise NotAssetConvertible( "Input was not a AssetConvertible " "or iterable of AssetConvertible." ) for obj in iterator: self._lookup_generic_scalar( obj=obj, as_of_date=as_of_date, country_code=country_code, matches=matches, missing=missing, ) return matches, missing def _compute_asset_lifetimes(self, country_codes): """ Compute and cache a recarray of asset lifetimes. """ sids = starts = ends = [] equities_cols = self.equities.c # if country_codes: # results = sa.select(( # equities_cols.sid, # equities_cols.start_date, # equities_cols.end_date, # )).where( # (self.exchanges.c.exchange == equities_cols.exchange) & # (self.exchanges.c.country_code.in_(country_codes)) # ).execute().fetchall() # if results: # sids, starts, ends = zip(*results) # TODO Domain bypass if country_codes: results = sa.select(( equities_cols.sid, equities_cols.start_date, equities_cols.end_date, )).execute().fetchall() if results: sids, starts, ends = zip(*results) sid = np.array(sids, dtype='i8') start = np.array(starts, dtype='f8') end = np.array(ends, dtype='f8') start[np.isnan(start)] = 0 # convert missing starts to 0 end[np.isnan(end)] = np.iinfo(int).max # convert missing end to INTMAX return Lifetimes(sid, start.astype('i8'), end.astype('i8')) def lifetimes(self, dates, include_start_date, country_codes): """ Compute a DataFrame representing asset lifetimes for the specified date range. Parameters ---------- dates : pd.DatetimeIndex The dates for which to compute lifetimes. include_start_date : bool Whether or not to count the asset as alive on its start_date. This is useful in a backtesting context where `lifetimes` is being used to signify "do I have data for this asset as of the morning of this date?" For many financial metrics, (e.g. daily close), data isn't available for an asset until the end of the asset's first day. country_codes : iterable[str] The country codes to get lifetimes for. Returns ------- lifetimes : pd.DataFrame A frame of dtype bool with `dates` as index and an Int64Index of assets as columns. The value at `lifetimes.loc[date, asset]` will be True iff `asset` existed on `date`. If `include_start_date` is False, then lifetimes.loc[date, asset] will be false when date == asset.start_date. See Also -------- numpy.putmask zipline.pipeline.engine.SimplePipelineEngine._compute_root_mask """ if isinstance(country_codes, string_types): raise TypeError( "Got string {!r} instead of an iterable of strings in " "AssetFinder.lifetimes.".format(country_codes), ) # normalize to a cache-key so that we can memoize results. country_codes = frozenset(country_codes) lifetimes = self._asset_lifetimes.get(country_codes) if lifetimes is None: self._asset_lifetimes[country_codes] = lifetimes = ( self._compute_asset_lifetimes(country_codes) ) raw_dates = as_column(dates.asi8) if include_start_date: mask = lifetimes.start <= raw_dates else: mask = lifetimes.start < raw_dates mask &= (raw_dates <= lifetimes.end) return pd.DataFrame(mask, index=dates, columns=lifetimes.sid) def equities_sids_for_country_code(self, country_code): """Return all of the sids for a given country. Parameters ---------- country_code : str An ISO 3166 alpha-2 country code. Returns ------- tuple[int] The sids whose exchanges are in this country. """ sids = self._compute_asset_lifetimes([country_code]).sid return tuple(sids.tolist()) class AssetConvertible(with_metaclass(ABCMeta)): """ ABC for types that are convertible to integer-representations of Assets. Includes Asset, six.string_types, and Integral """ pass AssetConvertible.register(Integral) AssetConvertible.register(Asset) # Use six.string_types for Python2/3 compatibility for _type in string_types: AssetConvertible.register(_type) class NotAssetConvertible(ValueError): pass class PricingDataAssociable(with_metaclass(ABCMeta)): """ ABC for types that can be associated with pricing data. Includes Asset, Future, ContinuousFuture """ pass PricingDataAssociable.register(Asset) PricingDataAssociable.register(Future) PricingDataAssociable.register(ContinuousFuture) def was_active(reference_date_value, asset): """ Whether or not `asset` was active at the time corresponding to `reference_date_value`. Parameters ---------- reference_date_value : int Date, represented as nanoseconds since EPOCH, for which we want to know if `asset` was alive. This is generally the result of accessing the `value` attribute of a pandas Timestamp. asset : Asset The asset object to check. Returns ------- was_active : bool Whether or not the `asset` existed at the specified time. """ return ( asset.start_date.value <= reference_date_value <= asset.end_date.value ) def only_active_assets(reference_date_value, assets): """ Filter an iterable of Asset objects down to just assets that were alive at the time corresponding to `reference_date_value`. Parameters ---------- reference_date_value : int Date, represented as nanoseconds since EPOCH, for which we want to know if `asset` was alive. This is generally the result of accessing the `value` attribute of a pandas Timestamp. assets : iterable[Asset] The assets to filter. Returns ------- active_assets : list List of the active assets from `assets` on the requested date. """ return [a for a in assets if was_active(reference_date_value, a)]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/assets.py
assets.py
from itertools import product from string import ascii_uppercase import pandas as pd from pandas.tseries.offsets import MonthBegin from .futures import CMES_CODE_TO_MONTH def make_rotating_equity_info(num_assets, first_start, frequency, periods_between_starts, asset_lifetime, exchange='TEST'): """ Create a DataFrame representing lifetimes of assets that are constantly rotating in and out of existence. Parameters ---------- num_assets : int How many assets to create. first_start : pd.Timestamp The start date for the first asset. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret next two arguments. periods_between_starts : int Create a new asset every `frequency` * `periods_between_new` asset_lifetime : int Each asset exists for `frequency` * `asset_lifetime` days. exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ return pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], # Start a new asset every `periods_between_starts` days. 'start_date': pd.date_range( first_start, freq=(periods_between_starts * frequency), periods=num_assets, ), # Each asset lasts for `asset_lifetime` days. 'end_date': pd.date_range( first_start + (asset_lifetime * frequency), freq=(periods_between_starts * frequency), periods=num_assets, ), 'exchange': exchange, }, index=range(num_assets), ) def make_simple_equity_info(sids, start_date, end_date, symbols=None, names=None, exchange='TEST'): """ Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`. Parameters ---------- sids : array-like of int start_date : pd.Timestamp, optional end_date : pd.Timestamp, optional symbols : list, optional Symbols to use for the assets. If not provided, symbols are generated from the sequence 'A', 'B', ... names : list, optional Names to use for the assets. If not provided, names are generated by adding " INC." to each of the symbols (which might also be auto-generated). exchange : str, optional The exchange name. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ num_assets = len(sids) if symbols is None: symbols = list(ascii_uppercase[:num_assets]) else: symbols = list(symbols) if names is None: names = [str(s) + " INC." for s in symbols] return pd.DataFrame( { 'symbol': symbols, 'start_date': pd.to_datetime([start_date] * num_assets), 'end_date': pd.to_datetime([end_date] * num_assets), 'asset_name': list(names), 'exchange': exchange, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), ) def make_simple_multi_country_equity_info(countries_to_sids, countries_to_exchanges, start_date, end_date): """Create a DataFrame representing assets that exist for the full duration between `start_date` and `end_date`, from multiple countries. """ sids = [] symbols = [] exchanges = [] for country, country_sids in countries_to_sids.items(): exchange = countries_to_exchanges[country] for i, sid in enumerate(country_sids): sids.append(sid) symbols.append('-'.join([country, str(i)])) exchanges.append(exchange) return pd.DataFrame( { 'symbol': symbols, 'start_date': start_date, 'end_date': end_date, 'asset_name': symbols, 'exchange': exchanges, }, index=sids, columns=( 'start_date', 'end_date', 'symbol', 'exchange', 'asset_name', ), ) def make_jagged_equity_info(num_assets, start_date, first_end, frequency, periods_between_ends, auto_close_delta): """ Create a DataFrame representing assets that all begin at the same start date, but have cascading end dates. Parameters ---------- num_assets : int How many assets to create. start_date : pd.Timestamp The start date for all the assets. first_end : pd.Timestamp The date at which the first equity will end. frequency : str or pd.tseries.offsets.Offset (e.g. trading_day) Frequency used to interpret the next argument. periods_between_ends : int Starting after the first end date, end each asset every `frequency` * `periods_between_ends`. Returns ------- info : pd.DataFrame DataFrame representing newly-created assets. """ frame = pd.DataFrame( { 'symbol': [chr(ord('A') + i) for i in range(num_assets)], 'start_date': start_date, 'end_date': pd.date_range( first_end, freq=(periods_between_ends * frequency), periods=num_assets, ), 'exchange': 'TEST', }, index=range(num_assets), ) # Explicitly pass None to disable setting the auto_close_date column. if auto_close_delta is not None: frame['auto_close_date'] = frame['end_date'] + auto_close_delta return frame def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): """ Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter. """ if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz='UTC') for s in year_strs] # Pairs of string/date like ('K06', 2006-05-01) sorted by year/month # `MonthBegin(month_num - 1)` since the year already starts at month 1. contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num - 1)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), sorted(list(month_codes.items()), key=lambda item: item[1]), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ 'sid': sid, 'root_symbol': root_sym, 'symbol': root_sym + suffix, 'start_date': start_date_func(month_begin), 'notice_date': notice_date_func(month_begin), 'expiration_date': expiration_date_func(month_begin), 'multiplier': multiplier, 'exchange': "TEST", }) return pd.DataFrame.from_records(contracts, index='sid') def make_commodity_future_info(first_sid, root_symbols, years, month_codes=None, multiplier=500): """ Make futures testing data that simulates the notice/expiration date behavior of physical commodities like oil. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Expiration dates are on the 20th of the month prior to the month code. Notice dates are are on the 20th two months prior to the month code. Start dates are one year before the contract month. See Also -------- make_future_info """ nineteen_days = pd.Timedelta(days=19) one_year = pd.Timedelta(days=365) return make_future_info( first_sid=first_sid, root_symbols=root_symbols, years=years, notice_date_func=lambda dt: dt - MonthBegin(2) + nineteen_days, expiration_date_func=lambda dt: dt - MonthBegin(1) + nineteen_days, start_date_func=lambda dt: dt - one_year, month_codes=month_codes, multiplier=multiplier, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/synthetic.py
synthetic.py
from collections import namedtuple import re import numpy as np import pandas as pd import sqlalchemy as sa from toolz import first from zipline.errors import AssetDBVersionError from zipline.assets.asset_db_schema import ( ASSET_DB_VERSION, asset_db_table_names, asset_router, equities as equities_table, equity_symbol_mappings, equity_supplementary_mappings as equity_supplementary_mappings_table, futures_contracts as futures_contracts_table, exchanges as exchanges_table, futures_root_symbols, metadata, version_info, ) from sqlalchemy.exc import IntegrityError from zipline.utils.compat import ExitStack from zipline.utils.preprocess import preprocess from zipline.utils.range import from_tuple, intersecting_ranges from zipline.utils.db_utils import coerce_string_to_eng # Define a namedtuple for use with the load_data and _load_data methods AssetData = namedtuple( 'AssetData', ( 'equities', 'equities_mappings', 'futures', 'exchanges', 'root_symbols', 'equity_supplementary_mappings', ), ) SQLITE_MAX_VARIABLE_NUMBER = 999 symbol_columns = frozenset({ 'symbol', 'company_symbol', 'share_class_symbol', }) mapping_columns = symbol_columns | {'start_date', 'end_date'} _index_columns = { 'equities': 'sid', 'equity_supplementary_mappings': 'sid', 'futures': 'sid', 'exchanges': 'exchange', 'root_symbols': 'root_symbol', } def _normalize_index_columns_in_place(equities, equity_supplementary_mappings, futures, exchanges, root_symbols): """ Update dataframes in place to set indentifier columns as indices. For each input frame, if the frame has a column with the same name as its associated index column, set that column as the index. Otherwise, assume the index already contains identifiers. If frames are passed as None, they're ignored. """ for frame, column_name in ((equities, 'sid'), (equity_supplementary_mappings, 'sid'), (futures, 'sid'), (exchanges, 'exchange'), (root_symbols, 'root_symbol')): if frame is not None and column_name in frame: frame.set_index(column_name, inplace=True) if frame is not None: frame.index.rename(column_name, inplace=True) def _default_none(df, column): return None def _no_default(df, column): if not df.empty: raise ValueError('no default value for column %r' % column) # Default values for the equities DataFrame _equities_defaults = { 'symbol': _default_none, 'asset_name': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, 'first_traded': _default_none, 'auto_close_date': _default_none, # the full exchange name 'exchange': _no_default, } # the defaults for ``equities`` in ``write_direct`` _direct_equities_defaults = _equities_defaults.copy() del _direct_equities_defaults['symbol'] # Default values for the futures DataFrame _futures_defaults = { 'symbol': _default_none, 'root_symbol': _default_none, 'asset_name': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, 'first_traded': _default_none, 'exchange': _default_none, 'notice_date': _default_none, 'expiration_date': _default_none, 'auto_close_date': _default_none, 'tick_size': _default_none, 'multiplier': lambda df, col: 1, } # Default values for the exchanges DataFrame _exchanges_defaults = { 'canonical_name': lambda df, col: df.index, 'country_code': lambda df, col: '??', } # Default values for the root_symbols DataFrame _root_symbols_defaults = { 'sector': _default_none, 'description': _default_none, 'exchange': _default_none, } # Default values for the equity_supplementary_mappings DataFrame _equity_supplementary_mappings_defaults = { 'value': _default_none, 'field': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, } # Default values for the equity_symbol_mappings DataFrame _equity_symbol_mappings_defaults = { 'sid': _no_default, 'company_symbol': _default_none, 'share_class_symbol': _default_none, 'symbol': _default_none, 'start_date': lambda df, col: 0, 'end_date': lambda df, col: np.iinfo(np.int64).max, } # Fuzzy symbol delimiters that may break up a company symbol and share class _delimited_symbol_delimiters_regex = re.compile(r'[./\-_]') _delimited_symbol_default_triggers = frozenset({np.nan, None, ''}) def split_delimited_symbol(symbol): """ Takes in a symbol that may be delimited and splits it in to a company symbol and share class symbol. Also returns the fuzzy symbol, which is the symbol without any fuzzy characters at all. Parameters ---------- symbol : str The possibly-delimited symbol to be split Returns ------- company_symbol : str The company part of the symbol. share_class_symbol : str The share class part of a symbol. """ # return blank strings for any bad fuzzy symbols, like NaN or None if symbol in _delimited_symbol_default_triggers: return '', '' symbol = symbol.upper() split_list = re.split( pattern=_delimited_symbol_delimiters_regex, string=symbol, maxsplit=1, ) # Break the list up in to its two components, the company symbol and the # share class symbol company_symbol = split_list[0] if len(split_list) > 1: share_class_symbol = split_list[1] else: share_class_symbol = '' return company_symbol, share_class_symbol def _generate_output_dataframe(data_subset, defaults): """ Generates an output dataframe from the given subset of user-provided data, the given column names, and the given default values. Parameters ---------- data_subset : DataFrame A DataFrame, usually from an AssetData object, that contains the user's input metadata for the asset type being processed defaults : dict A dict where the keys are the names of the columns of the desired output DataFrame and the values are a function from dataframe and column name to the default values to insert in the DataFrame if no user data is provided Returns ------- DataFrame A DataFrame containing all user-provided metadata, and default values wherever user-provided metadata was missing """ # The columns provided. cols = set(data_subset.columns) desired_cols = set(defaults) # Drop columns with unrecognised headers. data_subset.drop(cols - desired_cols, axis=1, inplace=True) # Get those columns which we need but # for which no data has been supplied. for col in desired_cols - cols: # write the default value for any missing columns data_subset[col] = defaults[col](data_subset, col) return data_subset def _check_asset_group(group): row = group.sort_values('end_date').iloc[-1] row.start_date = group.start_date.min() row.end_date = group.end_date.max() row.drop(list(symbol_columns), inplace=True) return row def _format_range(r): return ( str(pd.Timestamp(r.start, unit='ns')), str(pd.Timestamp(r.stop, unit='ns')), ) def _check_symbol_mappings(df, exchanges, asset_exchange): """Check that there are no cases where multiple symbols resolve to the same asset at the same time in the same country. Parameters ---------- df : pd.DataFrame The equity symbol mappings table. exchanges : pd.DataFrame The exchanges table. asset_exchange : pd.Series A series that maps sids to the exchange the asset is in. Raises ------ ValueError Raised when there are ambiguous symbol mappings. """ mappings = df.set_index('sid')[list(mapping_columns)].copy() if not exchanges.empty and not exchanges.index.name == 'exchange': exchanges.index = exchanges['exchange'] mappings['country_code'] = exchanges['country_code'][ asset_exchange.loc[df['sid']] ].values ambigious = {} def check_intersections(persymbol): intersections = list(intersecting_ranges(map( from_tuple, zip(persymbol.start_date, persymbol.end_date), ))) if intersections: data = persymbol[ ['start_date', 'end_date'] ].astype('datetime64[ns]') # indent the dataframe string, also compute this early because # ``persymbol`` is a view and ``astype`` doesn't copy the index # correctly in pandas 0.22 msg_component = '\n '.join(str(data).splitlines()) ambigious[persymbol.name] = intersections, msg_component mappings.groupby(['symbol', 'country_code']).apply(check_intersections) if ambigious: raise ValueError( 'Ambiguous ownership for %d symbol%s, multiple assets held the' ' following symbols:\n%s' % ( len(ambigious), '' if len(ambigious) == 1 else 's', '\n'.join( '%s (%s):\n intersections: %s\n %s' % ( symbol, country_code, tuple(map(_format_range, intersections)), cs, ) for (symbol, country_code), (intersections, cs) in sorted( ambigious.items(), key=first, ) ), ) ) def _split_symbol_mappings(df, exchanges): """Split out the symbol: sid mappings from the raw data. Parameters ---------- df : pd.DataFrame The dataframe with multiple rows for each symbol: sid pair. exchanges : pd.DataFrame The exchanges table. Returns ------- asset_info : pd.DataFrame The asset info with one row per asset. symbol_mappings : pd.DataFrame The dataframe of just symbol: sid mappings. The index will be the sid, then there will be three columns: symbol, start_date, and end_date. """ mappings = df[list(mapping_columns)] with pd.option_context('mode.chained_assignment', None): mappings['sid'] = mappings.index mappings.reset_index(drop=True, inplace=True) # take the most recent sid->exchange mapping based on end date asset_exchange = df[ ['exchange', 'end_date'] ].sort_values('end_date').groupby(level=0)['exchange'].nth(-1) _check_symbol_mappings(mappings, exchanges, asset_exchange) return ( df.groupby(level=0).apply(_check_asset_group), mappings, ) def _dt_to_epoch_ns(dt_series): """Convert a timeseries into an Int64Index of nanoseconds since the epoch. Parameters ---------- dt_series : pd.Series The timeseries to convert. Returns ------- idx : pd.Int64Index The index converted to nanoseconds since the epoch. """ index = pd.to_datetime(dt_series.values) if index.tzinfo is None: index = index.tz_localize('UTC') else: index = index.tz_convert('UTC') return index.view(np.int64) def check_version_info(conn, version_table, expected_version): """ Checks for a version value in the version table. Parameters ---------- conn : sa.Connection The connection to use to perform the check. version_table : sa.Table The version table of the asset database expected_version : int The expected version of the asset database Raises ------ AssetDBVersionError If the version is in the table and not equal to ASSET_DB_VERSION. """ # Read the version out of the table version_from_table = conn.execute( sa.select((version_table.c.version,)), ).scalar() # A db without a version is considered v0 if version_from_table is None: version_from_table = 0 # Raise an error if the versions do not match if (version_from_table != expected_version): raise AssetDBVersionError(db_version=version_from_table, expected_version=expected_version) def write_version_info(conn, version_table, version_value): """ Inserts the version value in to the version table. Parameters ---------- conn : sa.Connection The connection to use to execute the insert. version_table : sa.Table The version table of the asset database version_value : int The version to write in to the database """ conn.execute(sa.insert(version_table, values={'version': version_value})) class _empty(object): columns = () class AssetDBWriter(object): """Class used to write data to an assets db. Parameters ---------- engine : Engine or str An SQLAlchemy engine or path to a SQL database. """ DEFAULT_CHUNK_SIZE = SQLITE_MAX_VARIABLE_NUMBER @preprocess(engine=coerce_string_to_eng(require_exists=False)) def __init__(self, engine, asset_finder=None): self.asset_finder = asset_finder self.engine = engine def _real_write(self, equities, equity_symbol_mappings, equity_supplementary_mappings, futures, exchanges, root_symbols, chunk_size): with self.engine.connect() as conn: # Create SQL tables if they do not exist. self.init_db(conn) if exchanges is not None: self._write_df_to_table( exchanges_table, exchanges, conn, chunk_size, ) if root_symbols is not None: self._write_df_to_table( futures_root_symbols, root_symbols, conn, chunk_size, ) if equity_supplementary_mappings is not None: self._write_df_to_table( equity_supplementary_mappings_table, equity_supplementary_mappings, conn, chunk_size, ) if futures is not None: self._write_assets( 'future', futures, conn, chunk_size, ) if equities is not None: self._write_assets( 'equity', equities, conn, chunk_size, mapping_data=equity_symbol_mappings, ) def write_direct(self, equities=None, equity_symbol_mappings=None, equity_supplementary_mappings=None, futures=None, exchanges=None, root_symbols=None, chunk_size=DEFAULT_CHUNK_SIZE): """Write asset metadata to a sqlite database in the format that it is stored in the assets db. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here. """ if equities is not None: equities = _generate_output_dataframe( equities, _direct_equities_defaults, ) if equity_symbol_mappings is None: raise ValueError( 'equities provided with no symbol mapping data', ) equity_symbol_mappings = _generate_output_dataframe( equity_symbol_mappings, _equity_symbol_mappings_defaults, ) _check_symbol_mappings( equity_symbol_mappings, exchanges, equities['exchange'], ) if equity_supplementary_mappings is not None: equity_supplementary_mappings = _generate_output_dataframe( equity_supplementary_mappings, _equity_supplementary_mappings_defaults, ) if futures is not None: futures = _generate_output_dataframe(_futures_defaults, futures) if exchanges is not None: exchanges = _generate_output_dataframe( exchanges.set_index('exchange'), _exchanges_defaults, ) if root_symbols is not None: root_symbols = _generate_output_dataframe( root_symbols, _root_symbols_defaults, ) # Set named identifier columns as indices, if provided. _normalize_index_columns_in_place( equities=equities, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, ) self._real_write( equities=equities, equity_symbol_mappings=equity_symbol_mappings, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, chunk_size=chunk_size, ) def write(self, equities=None, futures=None, exchanges=None, root_symbols=None, equity_supplementary_mappings=None, chunk_size=DEFAULT_CHUNK_SIZE): """Write asset metadata to a sqlite database. Parameters ---------- equities : pd.DataFrame, optional The equity metadata. The columns for this dataframe are: symbol : str The ticker symbol for this equity. asset_name : str The full name for this asset. start_date : datetime The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. auto_close_date : datetime, optional The date on which to close any positions in this asset. exchange : str The exchange where this asset is traded. The index of this dataframe should contain the sids. futures : pd.DataFrame, optional The future contract metadata. The columns for this dataframe are: symbol : str The ticker symbol for this futures contract. root_symbol : str The root symbol, or the symbol with the expiration stripped out. asset_name : str The full name for this asset. start_date : datetime, optional The date when this asset was created. end_date : datetime, optional The last date we have trade data for this asset. first_traded : datetime, optional The first date we have trade data for this asset. exchange : str The exchange where this asset is traded. notice_date : datetime The date when the owner of the contract may be forced to take physical delivery of the contract's asset. expiration_date : datetime The date when the contract expires. auto_close_date : datetime The date when the broker will automatically close any positions in this contract. tick_size : float The minimum price movement of the contract. multiplier: float The amount of the underlying asset represented by this contract. exchanges : pd.DataFrame, optional The exchanges where assets can be traded. The columns of this dataframe are: exchange : str The full name of the exchange. canonical_name : str The canonical name of the exchange. country_code : str The ISO 3166 alpha-2 country code of the exchange. root_symbols : pd.DataFrame, optional The root symbols for the futures contracts. The columns for this dataframe are: root_symbol : str The root symbol name. root_symbol_id : int The unique id for this root symbol. sector : string, optional The sector of this root symbol. description : string, optional A short description of this root symbol. exchange : str The exchange where this root symbol is traded. equity_supplementary_mappings : pd.DataFrame, optional Additional mappings from values of abitrary type to assets. chunk_size : int, optional The amount of rows to write to the SQLite table at once. This defaults to the default number of bind params in sqlite. If you have compiled sqlite3 with more bind or less params you may want to pass that value here. See Also -------- zipline.assets.asset_finder """ if exchanges is None: exchange_names = [ df['exchange'] for df in (equities, futures, root_symbols) if df is not None ] if exchange_names: exchanges = pd.DataFrame({ 'exchange': pd.concat(exchange_names).unique(), }) data = self._load_data( equities if equities is not None else pd.DataFrame(), futures if futures is not None else pd.DataFrame(), exchanges if exchanges is not None else pd.DataFrame(), root_symbols if root_symbols is not None else pd.DataFrame(), ( equity_supplementary_mappings if equity_supplementary_mappings is not None else pd.DataFrame() ), ) self._real_write( equities=data.equities, equity_symbol_mappings=data.equities_mappings, equity_supplementary_mappings=data.equity_supplementary_mappings, futures=data.futures, root_symbols=data.root_symbols, exchanges=data.exchanges, chunk_size=chunk_size, ) def _write_df_to_table(self, tbl, df, txn, chunk_size): df = df.copy() for column, dtype in df.dtypes.iteritems(): if dtype.kind == 'M': df[column] = _dt_to_epoch_ns(df[column]) try: df.to_sql( tbl.name, txn.connection, index=True, index_label=first(tbl.primary_key.columns).name, if_exists='append', chunksize=chunk_size, ) except: df.reset_index(inplace=True) for i, row in df.iterrows(): values = {} for column in list(df.columns): # skip raw index, get set by backend if column == 'index': continue values[column] = row[column] try: ins = tbl.insert().values(values) txn.execute(ins) except IntegrityError: pkey_column = first(tbl.primary_key.columns) upd = tbl.update().where(pkey_column == values[pkey_column.name]).values(values) txn.execute(upd) # print(f'Skipping duplicate for table {tbl.name}: {values}') def _write_assets(self, asset_type, assets, txn, chunk_size, mapping_data=None): if asset_type == 'future': tbl = futures_contracts_table if mapping_data is not None: raise TypeError('no mapping data expected for futures') elif asset_type == 'equity': tbl = equities_table if mapping_data is None: raise TypeError('mapping data required for equities') else: raise ValueError( "asset_type must be in {'future', 'equity'}, got: %s" % asset_type, ) self._write_df_to_table(tbl, assets, txn, chunk_size) # if repeated but we need to write data to equities-table first, # otherwise we'll fail because of non-matched constraints if asset_type == 'equity': # write the symbol mapping data. self._write_df_to_table( equity_symbol_mappings, mapping_data, txn, chunk_size, ) router_df = pd.DataFrame({ asset_router.c.sid.name: assets.index.values, asset_router.c.asset_type.name: asset_type, }) self._write_df_to_table(asset_router, router_df, txn, chunk_size) def _all_tables_present(self, txn): """ Checks if any tables are present in the current assets database. Parameters ---------- txn : Transaction The open transaction to check in. Returns ------- has_tables : bool True if any tables are present, otherwise False. """ conn = txn.connect() for table_name in asset_db_table_names: if txn.dialect.has_table(conn, table_name): return True return False def init_db(self, txn=None): """Connect to database and create tables. Parameters ---------- txn : sa.engine.Connection, optional The transaction to execute in. If this is not provided, a new transaction will be started with the engine provided. Returns ------- metadata : sa.MetaData The metadata that describes the new assets db. """ with ExitStack() as stack: if txn is None: txn = stack.enter_context(self.engine.connect()) tables_already_exist = self._all_tables_present(txn) # Create the SQL tables if they do not already exist. metadata.create_all(txn, checkfirst=True) if tables_already_exist: check_version_info(txn, version_info, ASSET_DB_VERSION) else: write_version_info(txn, version_info, ASSET_DB_VERSION) def _normalize_equities(self, equities, exchanges): # HACK: If 'company_name' is provided, map it to asset_name if ('company_name' in equities.columns and 'asset_name' not in equities.columns): equities['asset_name'] = equities['company_name'] # remap 'file_name' to 'symbol' if provided if 'file_name' in equities.columns: equities['symbol'] = equities['file_name'] equities_output = _generate_output_dataframe( data_subset=equities, defaults=_equities_defaults, ) # Split symbols to company_symbols and share_class_symbols tuple_series = equities_output['symbol'].apply(split_delimited_symbol) split_symbols = pd.DataFrame( tuple_series.tolist(), columns=['company_symbol', 'share_class_symbol'], index=tuple_series.index ) equities_output = pd.concat((equities_output, split_symbols), axis=1) # Upper-case all symbol data for col in symbol_columns: equities_output[col] = equities_output[col].str.upper() # Convert date columns to UNIX Epoch integers (nanoseconds) for col in ('start_date', 'end_date', 'first_traded', 'auto_close_date'): equities_output[col] = _dt_to_epoch_ns(equities_output[col]) equities_output.index.rename('sid', inplace=True) return _split_symbol_mappings(equities_output, exchanges) def _normalize_futures(self, futures): futures_output = _generate_output_dataframe( data_subset=futures, defaults=_futures_defaults, ) for col in ('symbol', 'root_symbol'): futures_output[col] = futures_output[col].str.upper() for col in ('start_date', 'end_date', 'first_traded', 'notice_date', 'expiration_date', 'auto_close_date'): futures_output[col] = _dt_to_epoch_ns(futures_output[col]) return futures_output def _normalize_equity_supplementary_mappings(self, mappings): mappings_output = _generate_output_dataframe( data_subset=mappings, defaults=_equity_supplementary_mappings_defaults, ) for col in ('start_date', 'end_date'): mappings_output[col] = _dt_to_epoch_ns(mappings_output[col]) mappings_output.index.rename('sid', inplace=True) return mappings_output def _load_data(self, equities, futures, exchanges, root_symbols, equity_supplementary_mappings): """ Returns a standard set of pandas.DataFrames: equities, futures, exchanges, root_symbols """ # Set named identifier columns as indices, if provided. _normalize_index_columns_in_place( equities=equities, equity_supplementary_mappings=equity_supplementary_mappings, futures=futures, exchanges=exchanges, root_symbols=root_symbols, ) futures_output = self._normalize_futures(futures) equity_supplementary_mappings_output = ( self._normalize_equity_supplementary_mappings( equity_supplementary_mappings, ) ) exchanges_output = _generate_output_dataframe( data_subset=exchanges, defaults=_exchanges_defaults, ) equities_output, equities_mappings = self._normalize_equities( equities, exchanges_output, ) root_symbols_output = _generate_output_dataframe( data_subset=root_symbols, defaults=_root_symbols_defaults, ) return AssetData( equities=equities_output, equities_mappings=equities_mappings, futures=futures_output, exchanges=exchanges_output, root_symbols=root_symbols_output, equity_supplementary_mappings=equity_supplementary_mappings_output, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/asset_writer.py
asset_writer.py
from alembic.migration import MigrationContext from alembic.operations import Operations import sqlalchemy as sa from toolz.curried import do, operator from zipline.assets.asset_writer import write_version_info from zipline.utils.compat import wraps from zipline.errors import AssetDBImpossibleDowngrade from zipline.utils.preprocess import preprocess from zipline.utils.db_utils import coerce_string_to_eng def alter_columns(op, name, *columns, **kwargs): """Alter columns from a table. Parameters ---------- name : str The name of the table. *columns The new columns to have. selection_string : str, optional The string to use in the selection. If not provided, it will select all of the new columns from the old table. Notes ----- The columns are passed explicitly because this should only be used in a downgrade where ``zipline.assets.asset_db_schema`` could change. """ selection_string = kwargs.pop('selection_string', None) if kwargs: raise TypeError( 'alter_columns received extra arguments: %r' % sorted(kwargs), ) if selection_string is None: selection_string = ', '.join(column.name for column in columns) tmp_name = '_alter_columns_' + name op.rename_table(name, tmp_name) for column in columns: # Clear any indices that already exist on this table, otherwise we will # fail to create the table because the indices will already be present. # When we create the table below, the indices that we want to preserve # will just get recreated. for table in name, tmp_name: try: op.drop_index('ix_%s_%s' % (table, column.name)) except sa.exc.OperationalError: pass op.create_table(name, *columns) op.execute( 'insert into %s select %s from %s' % ( name, selection_string, tmp_name, ), ) op.drop_table(tmp_name) @preprocess(engine=coerce_string_to_eng(require_exists=True)) def downgrade(engine, desired_version): """Downgrades the assets db at the given engine to the desired version. Parameters ---------- engine : Engine An SQLAlchemy engine to the assets database. desired_version : int The desired resulting version for the assets database. """ # Check the version of the db at the engine with engine.begin() as conn: metadata = sa.MetaData(conn) metadata.reflect() version_info_table = metadata.tables['version_info'] starting_version = sa.select((version_info_table.c.version,)).scalar() # Check for accidental upgrade if starting_version < desired_version: raise AssetDBImpossibleDowngrade(db_version=starting_version, desired_version=desired_version) # Check if the desired version is already the db version if starting_version == desired_version: # No downgrade needed return # Create alembic context ctx = MigrationContext.configure(conn) op = Operations(ctx) # Integer keys of downgrades to run # E.g.: [5, 4, 3, 2] would downgrade v6 to v2 downgrade_keys = range(desired_version, starting_version)[::-1] # Disable foreign keys until all downgrades are complete _pragma_foreign_keys(conn, False) # Execute the downgrades in order for downgrade_key in downgrade_keys: _downgrade_methods[downgrade_key](op, conn, version_info_table) # Re-enable foreign keys _pragma_foreign_keys(conn, True) def _pragma_foreign_keys(connection, on): """Sets the PRAGMA foreign_keys state of the SQLite database. Disabling the pragma allows for batch modification of tables with foreign keys. Parameters ---------- connection : Connection A SQLAlchemy connection to the db on : bool If true, PRAGMA foreign_keys will be set to ON. Otherwise, the PRAGMA foreign_keys will be set to OFF. """ connection.execute("PRAGMA foreign_keys=%s" % ("ON" if on else "OFF")) # This dict contains references to downgrade methods that can be applied to an # assets db. The resulting db's version is the key. # e.g. The method at key '0' is the downgrade method from v1 to v0 _downgrade_methods = {} def downgrades(src): """Decorator for marking that a method is a downgrade to a version to the previous version. Parameters ---------- src : int The version this downgrades from. Returns ------- decorator : callable[(callable) -> callable] The decorator to apply. """ def _(f): destination = src - 1 @do(operator.setitem(_downgrade_methods, destination)) @wraps(f) def wrapper(op, conn, version_info_table): conn.execute(version_info_table.delete()) # clear the version f(op) write_version_info(conn, version_info_table, destination) return wrapper return _ @downgrades(1) def _downgrade_v1(op): """ Downgrade assets db by removing the 'tick_size' column and renaming the 'multiplier' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_futures_contracts_root_symbol') op.drop_index('ix_futures_contracts_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('futures_contracts') as batch_op: # Rename 'multiplier' batch_op.alter_column(column_name='multiplier', new_column_name='contract_multiplier') # Delete 'tick_size' batch_op.drop_column('tick_size') # Recreate indices after batch op.create_index('ix_futures_contracts_root_symbol', table_name='futures_contracts', columns=['root_symbol']) op.create_index('ix_futures_contracts_symbol', table_name='futures_contracts', columns=['symbol'], unique=True) @downgrades(2) def _downgrade_v2(op): """ Downgrade assets db by removing the 'auto_close_date' column. """ # Drop indices before batch # This is to prevent index collision when creating the temp table op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') # Execute batch op to allow column modification in SQLite with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('auto_close_date') # Recreate indices after batch op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol']) @downgrades(3) def _downgrade_v3(op): """ Downgrade assets db by adding a not null constraint on ``equities.first_traded`` """ op.create_table( '_new_equities', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text), sa.Column('company_symbol', sa.Text), sa.Column('share_class_symbol', sa.Text), sa.Column('fuzzy_symbol', sa.Text), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer), sa.Column('exchange', sa.Text), ) op.execute( """ insert into _new_equities select * from equities where equities.first_traded is not null """, ) op.drop_table('equities') op.rename_table('_new_equities', 'equities') # we need to make sure the indices have the proper names after the rename op.create_index( 'ix_equities_company_symbol', 'equities', ['company_symbol'], ) op.create_index( 'ix_equities_fuzzy_symbol', 'equities', ['fuzzy_symbol'], ) @downgrades(4) def _downgrade_v4(op): """ Downgrades assets db by copying the `exchange_full` column to `exchange`, then dropping the `exchange_full` column. """ op.drop_index('ix_equities_fuzzy_symbol') op.drop_index('ix_equities_company_symbol') op.execute("UPDATE equities SET exchange = exchange_full") with op.batch_alter_table('equities') as batch_op: batch_op.drop_column('exchange_full') op.create_index('ix_equities_fuzzy_symbol', table_name='equities', columns=['fuzzy_symbol']) op.create_index('ix_equities_company_symbol', table_name='equities', columns=['company_symbol']) @downgrades(5) def _downgrade_v5(op): op.create_table( '_new_equities', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text), sa.Column('company_symbol', sa.Text), sa.Column('share_class_symbol', sa.Text), sa.Column('fuzzy_symbol', sa.Text), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer), sa.Column('auto_close_date', sa.Integer), sa.Column('exchange', sa.Text), sa.Column('exchange_full', sa.Text) ) op.execute( """ insert into _new_equities select equities.sid as sid, sym.symbol as symbol, sym.company_symbol as company_symbol, sym.share_class_symbol as share_class_symbol, sym.company_symbol || sym.share_class_symbol as fuzzy_symbol, equities.asset_name as asset_name, equities.start_date as start_date, equities.end_date as end_date, equities.first_traded as first_traded, equities.auto_close_date as auto_close_date, equities.exchange as exchange, equities.exchange_full as exchange_full from equities inner join -- Select the last held symbol for each equity sid from the -- symbol_mappings table. Selecting max(end_date) causes -- SQLite to take the other values from the same row that contained -- the max end_date. See https://www.sqlite.org/lang_select.html#resultset. # noqa (select sid, symbol, company_symbol, share_class_symbol, max(end_date) from equity_symbol_mappings group by sid) as 'sym' on equities.sid == sym.sid """, ) op.drop_table('equity_symbol_mappings') op.drop_table('equities') op.rename_table('_new_equities', 'equities') # we need to make sure the indicies have the proper names after the rename op.create_index( 'ix_equities_company_symbol', 'equities', ['company_symbol'], ) op.create_index( 'ix_equities_fuzzy_symbol', 'equities', ['fuzzy_symbol'], ) @downgrades(6) def _downgrade_v6(op): op.drop_table('equity_supplementary_mappings') @downgrades(7) def _downgrade_v7(op): tmp_name = '_new_equities' op.create_table( tmp_name, sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer), sa.Column('auto_close_date', sa.Integer), # remove foreign key to exchange sa.Column('exchange', sa.Text), # add back exchange full column sa.Column('exchange_full', sa.Text), ) op.execute( """ insert into _new_equities select eq.sid, eq.asset_name, eq.start_date, eq.end_date, eq.first_traded, eq.auto_close_date, ex.canonical_name, ex.exchange from equities eq inner join exchanges ex on eq.exchange == ex.exchange where ex.country_code in ('US', '??') """, ) op.drop_table('equities') op.rename_table(tmp_name, 'equities') # rebuild all tables without a foreign key to ``exchanges`` alter_columns( op, 'futures_root_symbols', sa.Column( 'root_symbol', sa.Text, unique=True, nullable=False, primary_key=True, ), sa.Column('root_symbol_id', sa.Integer), sa.Column('sector', sa.Text), sa.Column('description', sa.Text), sa.Column('exchange', sa.Text), ) alter_columns( op, 'futures_contracts', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text, unique=True, index=True), sa.Column('root_symbol', sa.Text, index=True), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer), sa.Column('exchange', sa.Text), sa.Column('notice_date', sa.Integer, nullable=False), sa.Column('expiration_date', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer, nullable=False), sa.Column('multiplier', sa.Float), sa.Column('tick_size', sa.Float), ) # drop the ``country_code`` and ``canonical_name`` columns alter_columns( op, 'exchanges', sa.Column( 'exchange', sa.Text, unique=True, nullable=False, primary_key=True, ), sa.Column('timezone', sa.Text), # Set the timezone to NULL because we don't know what it was before. # Nothing in zipline reads the timezone so it doesn't matter. selection_string="exchange, NULL", ) op.rename_table('exchanges', 'futures_exchanges') # add back the foreign keys that previously existed alter_columns( op, 'futures_root_symbols', sa.Column( 'root_symbol', sa.Text, unique=True, nullable=False, primary_key=True, ), sa.Column('root_symbol_id', sa.Integer), sa.Column('sector', sa.Text), sa.Column('description', sa.Text), sa.Column( 'exchange', sa.Text, sa.ForeignKey('futures_exchanges.exchange'), ), ) alter_columns( op, 'futures_contracts', sa.Column( 'sid', sa.Integer, unique=True, nullable=False, primary_key=True, ), sa.Column('symbol', sa.Text, unique=True, index=True), sa.Column( 'root_symbol', sa.Text, sa.ForeignKey('futures_root_symbols.root_symbol'), index=True ), sa.Column('asset_name', sa.Text), sa.Column('start_date', sa.Integer, default=0, nullable=False), sa.Column('end_date', sa.Integer, nullable=False), sa.Column('first_traded', sa.Integer), sa.Column( 'exchange', sa.Text, sa.ForeignKey('futures_exchanges.exchange'), ), sa.Column('notice_date', sa.Integer, nullable=False), sa.Column('expiration_date', sa.Integer, nullable=False), sa.Column('auto_close_date', sa.Integer, nullable=False), sa.Column('multiplier', sa.Float), sa.Column('tick_size', sa.Float), ) # Delete equity_symbol_mappings records that no longer refer to valid sids. op.execute( """ DELETE FROM equity_symbol_mappings WHERE sid NOT IN (SELECT sid FROM equities); """ ) # Delete asset_router records that no longer refer to valid sids. op.execute( """ DELETE FROM asset_router WHERE sid NOT IN ( SELECT sid FROM equities UNION SELECT sid FROM futures_contracts ); """ )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/asset_db_migrations.py
asset_db_migrations.py
from abc import ABCMeta, abstractmethod from six import with_metaclass # Number of days over which to compute rolls when finding the current contract # for a volume-rolling contract chain. For more details on why this is needed, # see `VolumeRollFinder.get_contract_center`. ROLL_DAYS_FOR_CURRENT_CONTRACT = 90 class RollFinder(with_metaclass(ABCMeta, object)): """ Abstract base class for calculating when futures contracts are the active contract. """ @abstractmethod def _active_contract(self, oc, front, back, dt): raise NotImplementedError def _get_active_contract_at_offset(self, root_symbol, dt, offset): """ For the given root symbol, find the contract that is considered active on a specific date at a specific offset. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) session = self.trading_calendar.minute_to_session_label(dt) front = oc.contract_before_auto_close(session.value) back = oc.contract_at_offset(front, 1, dt.value) if back is None: return front primary = self._active_contract(oc, front, back, session) return oc.contract_at_offset(primary, offset, session.value) def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ return self._get_active_contract_at_offset(root_symbol, dt, offset) def get_rolls(self, root_symbol, start, end, offset): """ Get the rolls, i.e. the session at which to hop from contract to contract in the chain. Parameters ---------- root_symbol : str The root symbol for which to calculate rolls. start : Timestamp Start of the date range. end : Timestamp End of the date range. offset : int Offset from the primary. Returns ------- rolls - list[tuple(sid, roll_date)] A list of rolls, where first value is the first active `sid`, and the `roll_date` on which to hop to the next contract. The last pair in the chain has a value of `None` since the roll is after the range. """ oc = self.asset_finder.get_ordered_contracts(root_symbol) front = self._get_active_contract_at_offset(root_symbol, end, 0) back = oc.contract_at_offset(front, 1, end.value) if back is not None: end_session = self.trading_calendar.minute_to_session_label(end) first = self._active_contract(oc, front, back, end_session) else: first = front first_contract = oc.sid_to_contract[first] rolls = [((first_contract >> offset).contract.sid, None)] tc = self.trading_calendar sessions = tc.sessions_in_range(tc.minute_to_session_label(start), tc.minute_to_session_label(end)) freq = sessions.freq if first == front: # This is a bit tricky to grasp. Once we have the active contract # on the given end date, we want to start walking backwards towards # the start date and checking for rolls. For this, we treat the # previous month's contract as the 'first' contract, and the # contract we just found to be active as the 'back'. As we walk # towards the start date, if the 'back' is no longer active, we add # that date as a roll. curr = first_contract << 1 else: curr = first_contract << 2 session = sessions[-1] while session > start and curr is not None: front = curr.contract.sid back = rolls[0][0] prev_c = curr.prev while session > start: prev = session - freq if prev_c is not None: if prev < prev_c.contract.auto_close_date: break if back != self._active_contract(oc, front, back, prev): # TODO: Instead of listing each contract with its roll date # as tuples, create a series which maps every day to the # active contract on that day. rolls.insert(0, ((curr >> offset).contract.sid, session)) break session = prev curr = curr.prev if curr is not None: session = min(session, curr.contract.auto_close_date + freq) return rolls class CalendarRollFinder(RollFinder): """ The CalendarRollFinder calculates contract rolls based purely on the contract's auto close date. """ def __init__(self, trading_calendar, asset_finder): self.trading_calendar = trading_calendar self.asset_finder = asset_finder def _active_contract(self, oc, front, back, dt): contract = oc.sid_to_contract[front].contract auto_close_date = contract.auto_close_date auto_closed = dt >= auto_close_date return back if auto_closed else front class VolumeRollFinder(RollFinder): """ The VolumeRollFinder calculates contract rolls based on when volume activity transfers from one contract to another. """ GRACE_DAYS = 7 def __init__(self, trading_calendar, asset_finder, session_reader): self.trading_calendar = trading_calendar self.asset_finder = asset_finder self.session_reader = session_reader def _active_contract(self, oc, front, back, dt): r""" Return the active contract based on the previous trading day's volume. In the rare case that a double volume switch occurs we treat the first switch as the roll. Take the following case for example: | +++++ _____ | + __ / <--- 'G' | ++/++\++++/++ | _/ \__/ + | / + | ____/ + <--- 'F' |_________|__|___|________ a b c <--- Switches We should treat 'a' as the roll date rather than 'c' because from the perspective of 'a', if a switch happens and we are pretty close to the auto-close date, we would probably assume it is time to roll. This means that for every date after 'a', `data.current(cf, 'contract')` should return the 'G' contract. """ front_contract = oc.sid_to_contract[front].contract back_contract = oc.sid_to_contract[back].contract tc = self.trading_calendar trading_day = tc.day prev = dt - trading_day get_value = self.session_reader.get_value # If the front contract is past its auto close date it cannot be the # active contract, so return the back contract. Similarly, if the back # contract has not even started yet, just return the front contract. # The reason for using 'prev' to see if the contracts are alive instead # of using 'dt' is because we need to get each contract's volume on the # previous day, so we need to make sure that each contract exists on # 'prev' in order to call 'get_value' below. if dt > min(front_contract.auto_close_date, front_contract.end_date): return back elif front_contract.start_date > prev: return back elif dt > min(back_contract.auto_close_date, back_contract.end_date): return front elif back_contract.start_date > prev: return front front_vol = get_value(front, prev, 'volume') back_vol = get_value(back, prev, 'volume') if back_vol > front_vol: return back gap_start = max( back_contract.start_date, front_contract.auto_close_date - (trading_day * self.GRACE_DAYS), ) gap_end = prev - trading_day if dt < gap_start: return front # If we are within `self.GRACE_DAYS` of the front contract's auto close # date, and a volume flip happened during that period, return the back # contract as the active one. sessions = tc.sessions_in_range( tc.minute_to_session_label(gap_start), tc.minute_to_session_label(gap_end), ) for session in sessions: front_vol = get_value(front, session, 'volume') back_vol = get_value(back, session, 'volume') if back_vol > front_vol: return back return front def get_contract_center(self, root_symbol, dt, offset): """ Parameters ---------- root_symbol : str The root symbol for the contract chain. dt : Timestamp The datetime for which to retrieve the current contract. offset : int The offset from the primary contract. 0 is the primary, 1 is the secondary, etc. Returns ------- Future The active future contract at the given dt. """ # When determining the center contract on a specific day using volume # rolls, simply picking the contract with the highest volume could # cause flip-flopping between active contracts each day if the front # and back contracts are close in volume. Therefore, information about # the surrounding rolls is required. The `get_rolls` logic prevents # contracts from being considered active once they have rolled, so # incorporating that logic here prevents flip-flopping. day = self.trading_calendar.day end_date = min( dt + (ROLL_DAYS_FOR_CURRENT_CONTRACT * day), self.session_reader.last_available_dt, ) rolls = self.get_rolls( root_symbol=root_symbol, start=dt, end=end_date, offset=offset, ) sid, acd = rolls[0] return self.asset_finder.retrieve_asset(sid)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/assets/roll_finder.py
roll_finder.py
import sys import logbook import numpy as np from zipline.finance import commission, slippage zipline_logging = logbook.NestedSetup([ logbook.NullHandler(), logbook.StreamHandler(sys.stdout, level=logbook.INFO), logbook.StreamHandler(sys.stderr, level=logbook.ERROR), ]) zipline_logging.push_application() STOCKS = ['AMD', 'CERN', 'COST', 'DELL', 'GPS', 'INTC', 'MMM'] # On-Line Portfolio Moving Average Reversion # More info can be found in the corresponding paper: # http://icml.cc/2012/papers/168.pdf def initialize(algo, eps=1, window_length=5): algo.stocks = STOCKS algo.sids = [algo.symbol(symbol) for symbol in algo.stocks] algo.m = len(algo.stocks) algo.price = {} algo.b_t = np.ones(algo.m) / algo.m algo.last_desired_port = np.ones(algo.m) / algo.m algo.eps = eps algo.init = True algo.days = 0 algo.window_length = window_length algo.set_commission(commission.PerShare(cost=0, min_trade_cost=1.0)) algo.set_slippage(slippage.VolumeShareSlippage()) def handle_data(algo, data): algo.days += 1 if algo.days < algo.window_length: return if algo.init: rebalance_portfolio(algo, data, algo.b_t) algo.init = False return m = algo.m x_tilde = np.zeros(m) # find relative moving average price for each asset mavgs = data.history(algo.sids, 'price', algo.window_length, '1d').mean() for i, sid in enumerate(algo.sids): price = data.current(sid, "price") # Relative mean deviation x_tilde[i] = mavgs[sid] / price ########################### # Inside of OLMAR (algo 2) x_bar = x_tilde.mean() # market relative deviation mark_rel_dev = x_tilde - x_bar # Expected return with current portfolio exp_return = np.dot(algo.b_t, x_tilde) weight = algo.eps - exp_return variability = (np.linalg.norm(mark_rel_dev)) ** 2 # test for divide-by-zero case if variability == 0.0: step_size = 0 else: step_size = max(0, weight / variability) b = algo.b_t + step_size * mark_rel_dev b_norm = simplex_projection(b) np.testing.assert_almost_equal(b_norm.sum(), 1) rebalance_portfolio(algo, data, b_norm) # update portfolio algo.b_t = b_norm def rebalance_portfolio(algo, data, desired_port): # rebalance portfolio desired_amount = np.zeros_like(desired_port) current_amount = np.zeros_like(desired_port) prices = np.zeros_like(desired_port) if algo.init: positions_value = algo.portfolio.starting_cash else: positions_value = algo.portfolio.positions_value + \ algo.portfolio.cash for i, sid in enumerate(algo.sids): current_amount[i] = algo.portfolio.positions[sid].amount prices[i] = data.current(sid, "price") desired_amount = np.round(desired_port * positions_value / prices) algo.last_desired_port = desired_port diff_amount = desired_amount - current_amount for i, sid in enumerate(algo.sids): algo.order(sid, diff_amount[i]) def simplex_projection(v, b=1): r"""Projection vectors to the simplex domain Implemented according to the paper: Efficient projections onto the l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008. Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg Optimization Problem: min_{w}\| w - v \|_{2}^{2} s.t. sum_{i=1}^{m}=z, w_{i}\geq 0 Input: A vector v \in R^{m}, and a scalar z > 0 (default=1) Output: Projection vector w :Example: >>> proj = simplex_projection([.4 ,.3, -.4, .5]) >>> proj # doctest: +NORMALIZE_WHITESPACE array([ 0.33333333, 0.23333333, 0. , 0.43333333]) >>> print(proj.sum()) 1.0 Original matlab implementation: John Duchi ([email protected]) Python-port: Copyright 2013 by Thomas Wiecki ([email protected]). """ v = np.asarray(v) p = len(v) # Sort v into u in descending order v = (v > 0) * v u = np.sort(v)[::-1] sv = np.cumsum(u) rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1] theta = np.max([0, (sv[rho] - b) / (rho + 1)]) w = (v - theta) w[w < 0] = 0 return w # Note: this function can be removed if running # this algorithm on quantopian.com def analyze(context=None, results=None): import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) results.portfolio_value.plot(ax=ax) ax.set_ylabel('Portfolio value (USD)') plt.show() def _test_args(): """Extra arguments to use when zipline's automated tests run this example. """ import pandas as pd return { 'start': pd.Timestamp('2004', tz='utc'), 'end': pd.Timestamp('2008', tz='utc'), }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/examples/olmar.py
olmar.py
from zipline.api import order, record, symbol from zipline.finance import commission, slippage # Import exponential moving average from talib wrapper try: from talib import EMA except ImportError: msg = "Unable to import module TA-lib. Use `pip install TA-lib` to "\ "install. Note: if installation fails, you might need to install "\ "the underlying TA-lib library (more information can be found in "\ "the zipline installation documentation)." raise ImportError(msg) def initialize(context): context.asset = symbol('AAPL') # To keep track of whether we invested in the stock or not context.invested = False # Explicitly set the commission/slippage to the "old" value until we can # rebuild example data. # github.com/quantopian/zipline/blob/master/tests/resources/ # rebuild_example_data#L105 context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0)) context.set_slippage(slippage.VolumeShareSlippage()) def handle_data(context, data): trailing_window = data.history(context.asset, 'price', 40, '1d') if trailing_window.isnull().values.any(): return short_ema = EMA(trailing_window.values, timeperiod=20) long_ema = EMA(trailing_window.values, timeperiod=40) buy = False sell = False if (short_ema[-1] > long_ema[-1]) and not context.invested: order(context.asset, 100) context.invested = True buy = True elif (short_ema[-1] < long_ema[-1]) and context.invested: order(context.asset, -100) context.invested = False sell = True record(AAPL=data.current(context.asset, "price"), short_ema=short_ema[-1], long_ema=long_ema[-1], buy=buy, sell=sell) # Note: this function can be removed if running # this algorithm on quantopian.com def analyze(context=None, results=None): import matplotlib.pyplot as plt import logbook logbook.StderrHandler().push_application() log = logbook.Logger('Algorithm') fig = plt.figure() ax1 = fig.add_subplot(211) results.portfolio_value.plot(ax=ax1) ax1.set_ylabel('Portfolio value (USD)') ax2 = fig.add_subplot(212) ax2.set_ylabel('Price (USD)') # If data has been record()ed, then plot it. # Otherwise, log the fact that no data has been recorded. if 'AAPL' in results and 'short_ema' in results and 'long_ema' in results: results[['AAPL', 'short_ema', 'long_ema']].plot(ax=ax2) ax2.plot( results.index[results.buy], results.loc[results.buy, 'long_ema'], '^', markersize=10, color='m', ) ax2.plot( results.index[results.sell], results.loc[results.sell, 'short_ema'], 'v', markersize=10, color='k', ) plt.legend(loc=0) plt.gcf().set_size_inches(18, 8) else: msg = 'AAPL, short_ema and long_ema data not captured using record().' ax2.annotate(msg, xy=(0.1, 0.5)) log.info(msg) plt.show() def _test_args(): """Extra arguments to use when zipline's automated tests run this example. """ import pandas as pd return { 'start': pd.Timestamp('2014-01-01', tz='utc'), 'end': pd.Timestamp('2014-11-01', tz='utc'), }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/examples/dual_ema_talib.py
dual_ema_talib.py
from six import viewkeys from zipline.api import ( attach_pipeline, date_rules, order_target_percent, pipeline_output, record, schedule_function, ) from zipline.finance import commission, slippage from zipline.pipeline import Pipeline from zipline.pipeline.factors import RSI def make_pipeline(): rsi = RSI() return Pipeline( columns={ 'longs': rsi.top(3), 'shorts': rsi.bottom(3), }, ) def rebalance(context, data): # Pipeline data will be a dataframe with boolean columns named 'longs' and # 'shorts'. pipeline_data = context.pipeline_data all_assets = pipeline_data.index longs = all_assets[pipeline_data.longs] shorts = all_assets[pipeline_data.shorts] record(universe_size=len(all_assets)) # Build a 2x-leveraged, equal-weight, long-short portfolio. one_third = 1.0 / 3.0 for asset in longs: order_target_percent(asset, one_third) for asset in shorts: order_target_percent(asset, -one_third) # Remove any assets that should no longer be in our portfolio. portfolio_assets = longs | shorts positions = context.portfolio.positions for asset in viewkeys(positions) - set(portfolio_assets): # This will fail if the asset was removed from our portfolio because it # was delisted. if data.can_trade(asset): order_target_percent(asset, 0) def initialize(context): attach_pipeline(make_pipeline(), 'my_pipeline') # Rebalance each day. In daily mode, this is equivalent to putting # `rebalance` in our handle_data, but in minute mode, it's equivalent to # running at the start of the day each day. schedule_function(rebalance, date_rules.every_day()) # Explicitly set the commission/slippage to the "old" value until we can # rebuild example data. # github.com/quantopian/zipline/blob/master/tests/resources/ # rebuild_example_data#L105 context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0)) context.set_slippage(slippage.VolumeShareSlippage()) def before_trading_start(context, data): context.pipeline_data = pipeline_output('my_pipeline') def _test_args(): """ Extra arguments to use when zipline's automated tests run this example. Notes for testers: Gross leverage should be roughly 2.0 on every day except the first. Net leverage should be roughly 2.0 on every day except the first. Longs Count should always be 3 after the first day. Shorts Count should be 3 after the first day, except on 2013-10-30, when it dips to 2 for a day because DELL is delisted. """ import pandas as pd return { # We run through october of 2013 because DELL is in the test data and # it went private on 2013-10-29. 'start': pd.Timestamp('2013-10-07', tz='utc'), 'end': pd.Timestamp('2013-11-30', tz='utc'), 'capital_base': 100000, }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/examples/momentum_pipeline.py
momentum_pipeline.py
from importlib import import_module import os from toolz import merge from trading_calendars import register_calendar, get_calendar from zipline import run_algorithm # These are used by test_examples.py to discover the examples to run. def load_example_modules(): example_modules = {} for f in os.listdir(os.path.dirname(__file__)): if not f.endswith('.py') or f == '__init__.py': continue modname = f[:-len('.py')] mod = import_module('.' + modname, package=__name__) example_modules[modname] = mod globals()[modname] = mod # Remove noise from loop variables. del f, modname, mod return example_modules # Columns that we expect to be able to reliably deterministic # Doesn't include fields that have UUIDS. _cols_to_check = [ 'algo_volatility', 'algorithm_period_return', 'alpha', 'benchmark_period_return', 'benchmark_volatility', 'beta', 'capital_used', 'ending_cash', 'ending_exposure', 'ending_value', 'excess_return', 'gross_leverage', 'long_exposure', 'long_value', 'longs_count', 'max_drawdown', 'max_leverage', 'net_leverage', 'period_close', 'period_label', 'period_open', 'pnl', 'portfolio_value', 'positions', 'returns', 'short_exposure', 'short_value', 'shorts_count', 'sortino', 'starting_cash', 'starting_exposure', 'starting_value', 'trading_days', 'treasury_period_return', ] def run_example(example_modules, example_name, environ, benchmark_returns=None): """ Run an example module from zipline.examples. """ mod = example_modules[example_name] register_calendar("YAHOO", get_calendar("NYSE"), force=True) return run_algorithm( initialize=getattr(mod, 'initialize', None), handle_data=getattr(mod, 'handle_data', None), before_trading_start=getattr(mod, 'before_trading_start', None), analyze=getattr(mod, 'analyze', None), bundle='test', environ=environ, benchmark_returns=benchmark_returns, # Provide a default capital base, but allow the test to override. **merge({'capital_base': 1e7}, mod._test_args()) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/examples/__init__.py
__init__.py
from zipline.api import order_target, record, symbol from zipline.finance import commission, slippage def initialize(context): context.sym = symbol('AAPL') context.i = 0 # Explicitly set the commission/slippage to the "old" value until we can # rebuild example data. # github.com/quantopian/zipline/blob/master/tests/resources/ # rebuild_example_data#L105 context.set_commission(commission.PerShare(cost=.0075, min_trade_cost=1.0)) context.set_slippage(slippage.VolumeShareSlippage()) def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = data.history(context.sym, 'price', 100, '1d').mean() long_mavg = data.history(context.sym, 'price', 300, '1d').mean() # Trading logic if short_mavg > long_mavg: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(context.sym, 100) elif short_mavg < long_mavg: order_target(context.sym, 0) # Save values for later inspection record(AAPL=data.current(context.sym, "price"), short_mavg=short_mavg, long_mavg=long_mavg) # Note: this function can be removed if running # this algorithm on quantopian.com def analyze(context=None, results=None): import matplotlib.pyplot as plt import logbook logbook.StderrHandler().push_application() log = logbook.Logger('Algorithm') fig = plt.figure() ax1 = fig.add_subplot(211) results.portfolio_value.plot(ax=ax1) ax1.set_ylabel('Portfolio value (USD)') ax2 = fig.add_subplot(212) ax2.set_ylabel('Price (USD)') # If data has been record()ed, then plot it. # Otherwise, log the fact that no data has been recorded. if ('AAPL' in results and 'short_mavg' in results and 'long_mavg' in results): results['AAPL'].plot(ax=ax2) results[['short_mavg', 'long_mavg']].plot(ax=ax2) trans = results.loc[[t != [] for t in results.transactions]] buys = trans.loc[[t[0]['amount'] > 0 for t in trans.transactions]] sells = trans.loc[ [t[0]['amount'] < 0 for t in trans.transactions]] ax2.plot(buys.index, results.short_mavg.loc[buys.index], '^', markersize=10, color='m') ax2.plot(sells.index, results.short_mavg.loc[sells.index], 'v', markersize=10, color='k') plt.legend(loc=0) else: msg = 'AAPL, short_mavg & long_mavg data not captured using record().' ax2.annotate(msg, xy=(0.1, 0.5)) log.info(msg) plt.show() def _test_args(): """Extra arguments to use when zipline's automated tests run this example. """ import pandas as pd return { 'start': pd.Timestamp('2011', tz='utc'), 'end': pd.Timestamp('2013', tz='utc'), }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/examples/dual_moving_average.py
dual_moving_average.py
import pandas as pd from zipline.errors import ( InvalidBenchmarkAsset, BenchmarkAssetNotAvailableTooEarly, BenchmarkAssetNotAvailableTooLate ) class BenchmarkSource(object): def __init__(self, benchmark_asset, trading_calendar, sessions, data_portal, emission_rate="daily", benchmark_returns=None): self.benchmark_asset = benchmark_asset self.sessions = sessions self.emission_rate = emission_rate self.data_portal = data_portal if len(sessions) == 0: self._precalculated_series = pd.Series() elif benchmark_asset is not None: self._validate_benchmark(benchmark_asset) (self._precalculated_series, self._daily_returns) = self._initialize_precalculated_series( benchmark_asset, trading_calendar, sessions, data_portal ) elif benchmark_returns is not None: self._daily_returns = daily_series = benchmark_returns.reindex( sessions, ).fillna(0) if self.emission_rate == "minute": # we need to take the env's benchmark returns, which are daily, # and resample them to minute minutes = trading_calendar.minutes_for_sessions_in_range( sessions[0], sessions[-1] ) minute_series = daily_series.reindex( index=minutes, method="ffill" ) self._precalculated_series = minute_series else: self._precalculated_series = daily_series else: raise Exception("Must provide either benchmark_asset or " "benchmark_returns.") def get_value(self, dt): """Look up the returns for a given dt. Parameters ---------- dt : datetime The label to look up. Returns ------- returns : float The returns at the given dt or session. See Also -------- :class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns` .. warning:: This method expects minute inputs if ``emission_rate == 'minute'`` and session labels when ``emission_rate == 'daily``. """ return self._precalculated_series.loc[dt] def get_range(self, start_dt, end_dt): """Look up the returns for a given period. Parameters ---------- start_dt : datetime The inclusive start label. end_dt : datetime The inclusive end label. Returns ------- returns : pd.Series The series of returns. See Also -------- :class:`zipline.sources.benchmark_source.BenchmarkSource.daily_returns` .. warning:: This method expects minute inputs if ``emission_rate == 'minute'`` and session labels when ``emission_rate == 'daily``. """ return self._precalculated_series.loc[start_dt:end_dt] def daily_returns(self, start, end=None): """Returns the daily returns for the given period. Parameters ---------- start : datetime The inclusive starting session label. end : datetime, optional The inclusive ending session label. If not provided, treat ``start`` as a scalar key. Returns ------- returns : pd.Series or float The returns in the given period. The index will be the trading calendar in the range [start, end]. If just ``start`` is provided, return the scalar value on that day. """ if end is None: return self._daily_returns[start] return self._daily_returns[start:end] def _validate_benchmark(self, benchmark_asset): # check if this security has a stock dividend. if so, raise an # error suggesting that the user pick a different asset to use # as benchmark. stock_dividends = \ self.data_portal.get_stock_dividends(self.benchmark_asset, self.sessions) if len(stock_dividends) > 0: raise InvalidBenchmarkAsset( sid=str(self.benchmark_asset), dt=stock_dividends[0]["ex_date"] ) if benchmark_asset.start_date > self.sessions[0]: # the asset started trading after the first simulation day raise BenchmarkAssetNotAvailableTooEarly( sid=str(self.benchmark_asset), dt=self.sessions[0], start_dt=benchmark_asset.start_date ) if benchmark_asset.end_date < self.sessions[-1]: # the asset stopped trading before the last simulation day raise BenchmarkAssetNotAvailableTooLate( sid=str(self.benchmark_asset), dt=self.sessions[-1], end_dt=benchmark_asset.end_date ) @staticmethod def _compute_daily_returns(g): return (g[-1] - g[0]) / g[0] @classmethod def downsample_minute_return_series(cls, trading_calendar, minutely_returns): sessions = trading_calendar.minute_index_to_session_labels( minutely_returns.index, ) closes = trading_calendar.session_closes_in_range( sessions[0], sessions[-1], ) daily_returns = minutely_returns[closes].pct_change() daily_returns.index = closes.index return daily_returns.iloc[1:] def _initialize_precalculated_series(self, asset, trading_calendar, trading_days, data_portal): """ Internal method that pre-calculates the benchmark return series for use in the simulation. Parameters ---------- asset: Asset to use trading_calendar: TradingCalendar trading_days: pd.DateTimeIndex data_portal: DataPortal Notes ----- If the benchmark asset started trading after the simulation start, or finished trading before the simulation end, exceptions are raised. If the benchmark asset started trading the same day as the simulation start, the first available minute price on that day is used instead of the previous close. We use history to get an adjusted price history for each day's close, as of the look-back date (the last day of the simulation). Prices are fully adjusted for dividends, splits, and mergers. Returns ------- returns : pd.Series indexed by trading day, whose values represent the % change from close to close. daily_returns : pd.Series the partial daily returns for each minute """ if self.emission_rate == "minute": minutes = trading_calendar.minutes_for_sessions_in_range( self.sessions[0], self.sessions[-1] ) benchmark_series = data_portal.get_history_window( [asset], minutes[-1], bar_count=len(minutes) + 1, frequency="1m", field="price", data_frequency=self.emission_rate, ffill=True )[asset] return ( benchmark_series.pct_change()[1:], self.downsample_minute_return_series( trading_calendar, benchmark_series, ), ) start_date = asset.start_date if start_date < trading_days[0]: # get the window of close prices for benchmark_asset from the # last trading day of the simulation, going up to one day # before the simulation start day (so that we can get the % # change on day 1) benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days) + 1, frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] returns = benchmark_series.pct_change()[1:] return returns, returns elif start_date == trading_days[0]: # Attempt to handle case where stock data starts on first # day, in this case use the open to close return. benchmark_series = data_portal.get_history_window( [asset], trading_days[-1], bar_count=len(trading_days), frequency="1d", field="price", data_frequency=self.emission_rate, ffill=True )[asset] # get a minute history window of the first day first_open = data_portal.get_spot_value( asset, 'open', trading_days[0], 'daily', ) first_close = data_portal.get_spot_value( asset, 'close', trading_days[0], 'daily', ) first_day_return = (first_close - first_open) / first_open returns = benchmark_series.pct_change()[:] returns[0] = first_day_return return returns, returns else: raise ValueError( 'cannot set benchmark to asset that does not exist during' ' the simulation period (asset start date=%r)' % start_date )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/sources/benchmark_source.py
benchmark_source.py
from abc import ABCMeta, abstractmethod from collections import namedtuple import hashlib from textwrap import dedent import warnings from logbook import Logger import numpy import pandas as pd from pandas import read_csv import pytz import requests from six import StringIO, iteritems, with_metaclass from zipline.errors import ( MultipleSymbolsFound, SymbolNotFound, ZiplineError ) from zipline.protocol import ( DATASOURCE_TYPE, Event ) from zipline.assets import Equity logger = Logger('Requests Source Logger') def roll_dts_to_midnight(dts, trading_day): if len(dts) == 0: return dts return pd.DatetimeIndex( (dts.tz_convert('US/Eastern') - pd.Timedelta(hours=16)).date, tz='UTC', ) + trading_day class FetcherEvent(Event): pass class FetcherCSVRedirectError(ZiplineError): msg = dedent( """\ Attempt to fetch_csv from a redirected url. {url} must be changed to {new_url} """ ) def __init__(self, *args, **kwargs): self.url = kwargs["url"] self.new_url = kwargs["new_url"] self.extra = kwargs["extra"] super(FetcherCSVRedirectError, self).__init__(*args, **kwargs) # The following optional arguments are supported for # requests backed data sources. # see https://requests.readthedocs.io/en/latest/api/#main-interface # for a full list. ALLOWED_REQUESTS_KWARGS = { 'params', 'headers', 'auth', 'cert' } # The following optional arguments are supported for pandas' read_csv # function, and may be passed as kwargs to the datasource below. # see https://pandas.pydata.org/ # pandas-docs/stable/generated/pandas.io.parsers.read_csv.html ALLOWED_READ_CSV_KWARGS = { 'sep', 'dialect', 'doublequote', 'escapechar', 'quotechar', 'quoting', 'skipinitialspace', 'lineterminator', 'header', 'index_col', 'names', 'prefix', 'skiprows', 'skipfooter', 'skip_footer', 'na_values', 'true_values', 'false_values', 'delimiter', 'converters', 'dtype', 'delim_whitespace', 'as_recarray', 'na_filter', 'compact_ints', 'use_unsigned', 'buffer_lines', 'warn_bad_lines', 'error_bad_lines', 'keep_default_na', 'thousands', 'comment', 'decimal', 'keep_date_col', 'nrows', 'chunksize', 'encoding', 'usecols' } SHARED_REQUESTS_KWARGS = { 'stream': True, 'allow_redirects': False, } def mask_requests_args(url, validating=False, params_checker=None, **kwargs): requests_kwargs = {key: val for (key, val) in iteritems(kwargs) if key in ALLOWED_REQUESTS_KWARGS} if params_checker is not None: url, s_params = params_checker(url) if s_params: if 'params' in requests_kwargs: requests_kwargs['params'].update(s_params) else: requests_kwargs['params'] = s_params # Giving the connection 30 seconds. This timeout does not # apply to the download of the response body. # (Note that Quandl links can take >10 seconds to return their # first byte on occasion) requests_kwargs['timeout'] = 1.0 if validating else 30.0 requests_kwargs.update(SHARED_REQUESTS_KWARGS) request_pair = namedtuple("RequestPair", ("requests_kwargs", "url")) return request_pair(requests_kwargs, url) class PandasCSV(with_metaclass(ABCMeta, object)): def __init__(self, pre_func, post_func, asset_finder, trading_day, start_date, end_date, date_column, date_format, timezone, symbol, mask, symbol_column, data_frequency, country_code, **kwargs): self.start_date = start_date self.end_date = end_date self.date_column = date_column self.date_format = date_format self.timezone = timezone self.mask = mask self.symbol_column = symbol_column or "symbol" self.data_frequency = data_frequency self.country_code = country_code invalid_kwargs = set(kwargs) - ALLOWED_READ_CSV_KWARGS if invalid_kwargs: raise TypeError( "Unexpected keyword arguments: %s" % invalid_kwargs, ) self.pandas_kwargs = self.mask_pandas_args(kwargs) self.symbol = symbol self.finder = asset_finder self.trading_day = trading_day self.pre_func = pre_func self.post_func = post_func @property def fields(self): return self.df.columns.tolist() def get_hash(self): return self.namestring @abstractmethod def fetch_data(self): return @staticmethod def parse_date_str_series(format_str, tz, date_str_series, data_frequency, trading_day): """ Efficient parsing for a 1d Pandas/numpy object containing string representations of dates. Note: pd.to_datetime is significantly faster when no format string is passed, and in pandas 0.12.0 the %p strptime directive is not correctly handled if a format string is explicitly passed, but AM/PM is handled properly if format=None. Moreover, we were previously ignoring this parameter unintentionally because we were incorrectly passing it as a positional. For all these reasons, we ignore the format_str parameter when parsing datetimes. """ # Explicitly ignoring this parameter. See note above. if format_str is not None: logger.warn( "The 'format_str' parameter to fetch_csv is deprecated. " "Ignoring and defaulting to pandas default date parsing." ) format_str = None tz_str = str(tz) if tz_str == pytz.utc.zone: parsed = pd.to_datetime( date_str_series.values, format=format_str, utc=True, errors='coerce', ) else: parsed = pd.to_datetime( date_str_series.values, format=format_str, errors='coerce', ).tz_localize(tz_str).tz_convert('UTC') if data_frequency == 'daily': parsed = roll_dts_to_midnight(parsed, trading_day) return parsed def mask_pandas_args(self, kwargs): pandas_kwargs = {key: val for (key, val) in iteritems(kwargs) if key in ALLOWED_READ_CSV_KWARGS} if 'usecols' in pandas_kwargs: usecols = pandas_kwargs['usecols'] if usecols and self.date_column not in usecols: # make a new list so we don't modify user's, # and to ensure it is mutable with_date = list(usecols) with_date.append(self.date_column) pandas_kwargs['usecols'] = with_date # No strings in the 'symbol' column should be interpreted as NaNs pandas_kwargs.setdefault('keep_default_na', False) pandas_kwargs.setdefault('na_values', {'symbol': []}) return pandas_kwargs def _lookup_unconflicted_symbol(self, symbol): """ Attempt to find a unique asset whose symbol is the given string. If multiple assets have held the given symbol, return a 0. If no asset has held the given symbol, return a NaN. """ try: uppered = symbol.upper() except AttributeError: # The mapping fails because symbol was a non-string return numpy.nan try: return self.finder.lookup_symbol( uppered, as_of_date=None, country_code=self.country_code, ) except MultipleSymbolsFound: # Fill conflicted entries with zeros to mark that they need to be # resolved by date. return 0 except SymbolNotFound: # Fill not found entries with nans. return numpy.nan def load_df(self): df = self.fetch_data() if self.pre_func: df = self.pre_func(df) # Batch-convert the user-specifed date column into timestamps. df['dt'] = self.parse_date_str_series( self.date_format, self.timezone, df[self.date_column], self.data_frequency, self.trading_day, ).values # ignore rows whose dates we couldn't parse df = df[df['dt'].notnull()] if self.symbol is not None: df['sid'] = self.symbol elif self.finder: df.sort_values(by=self.symbol_column, inplace=True) # Pop the 'sid' column off of the DataFrame, just in case the user # has assigned it, and throw a warning try: df.pop('sid') warnings.warn( "Assignment of the 'sid' column of a DataFrame is " "not supported by Fetcher. The 'sid' column has been " "overwritten.", category=UserWarning, stacklevel=2, ) except KeyError: # There was no 'sid' column, so no warning is necessary pass # Fill entries for any symbols that don't require a date to # uniquely identify. Entries for which multiple securities exist # are replaced with zeroes, while entries for which no asset # exists are replaced with NaNs. unique_symbols = df[self.symbol_column].unique() sid_series = pd.Series( data=map(self._lookup_unconflicted_symbol, unique_symbols), index=unique_symbols, name='sid', ) df = df.join(sid_series, on=self.symbol_column) # Fill any zero entries left in our sid column by doing a lookup # using both symbol and the row date. conflict_rows = df[df['sid'] == 0] for row_idx, row in conflict_rows.iterrows(): try: asset = self.finder.lookup_symbol( row[self.symbol_column], # Replacing tzinfo here is necessary because of the # timezone metadata bug described below. row['dt'].replace(tzinfo=pytz.utc), country_code=self.country_code, # It's possible that no asset comes back here if our # lookup date is from before any asset held the # requested symbol. Mark such cases as NaN so that # they get dropped in the next step. ) or numpy.nan except SymbolNotFound: asset = numpy.nan # Assign the resolved asset to the cell df.loc[row_idx, 'sid'] = asset # Filter out rows containing symbols that we failed to find. length_before_drop = len(df) df = df[df['sid'].notnull()] no_sid_count = length_before_drop - len(df) if no_sid_count: logger.warn( "Dropped {} rows from fetched csv.".format(no_sid_count), no_sid_count, extra={'syslog': True}, ) else: df['sid'] = df['symbol'] # Dates are localized to UTC when they come out of # parse_date_str_series, but we need to re-localize them here because # of a bug that wasn't fixed until # https://github.com/pydata/pandas/pull/7092. # We should be able to remove the call to tz_localize once we're on # pandas 0.14.0 # We don't set 'dt' as the index until here because the Symbol parsing # operations above depend on having a unique index for the dataframe, # and the 'dt' column can contain multiple dates for the same entry. df.drop_duplicates(["sid", "dt"]) df.set_index(['dt'], inplace=True) df = df.tz_localize('UTC') df.sort_index(inplace=True) cols_to_drop = [self.date_column] if self.symbol is None: cols_to_drop.append(self.symbol_column) df = df[df.columns.drop(cols_to_drop)] if self.post_func: df = self.post_func(df) return df def __iter__(self): asset_cache = {} for dt, series in self.df.iterrows(): if dt < self.start_date: continue if dt > self.end_date: return event = FetcherEvent() # when dt column is converted to be the dataframe's index # the dt column is dropped. So, we need to manually copy # dt into the event. event.dt = dt for k, v in series.iteritems(): # convert numpy integer types to # int. This assumes we are on a 64bit # platform that will not lose information # by casting. # TODO: this is only necessary on the # amazon qexec instances. would be good # to figure out how to use the numpy dtypes # without this check and casting. if isinstance(v, numpy.integer): v = int(v) setattr(event, k, v) # If it has start_date, then it's already an Asset # object from asset_for_symbol, and we don't have to # transform it any further. Checking for start_date is # faster than isinstance. if event.sid in asset_cache: event.sid = asset_cache[event.sid] elif hasattr(event.sid, 'start_date'): # Clone for user algo code, if we haven't already. asset_cache[event.sid] = event.sid elif self.finder and isinstance(event.sid, int): asset = self.finder.retrieve_asset(event.sid, default_none=True) if asset: # Clone for user algo code. event.sid = asset_cache[asset] = asset elif self.mask: # When masking drop all non-mappable values. continue elif self.symbol is None: # If the event's sid property is an int we coerce # it into an Equity. event.sid = asset_cache[event.sid] = Equity(event.sid) event.type = DATASOURCE_TYPE.CUSTOM event.source_id = self.namestring yield event class PandasRequestsCSV(PandasCSV): # maximum 100 megs to prevent DDoS MAX_DOCUMENT_SIZE = (1024 * 1024) * 100 # maximum number of bytes to read in at a time CONTENT_CHUNK_SIZE = 4096 def __init__(self, url, pre_func, post_func, asset_finder, trading_day, start_date, end_date, date_column, date_format, timezone, symbol, mask, symbol_column, data_frequency, country_code, special_params_checker=None, **kwargs): # Peel off extra requests kwargs, forwarding the remaining kwargs to # the superclass. # Also returns possible https updated url if sent to http quandl ds # If url hasn't changed, will just return the original. self._requests_kwargs, self.url =\ mask_requests_args(url, params_checker=special_params_checker, **kwargs) remaining_kwargs = { k: v for k, v in iteritems(kwargs) if k not in self.requests_kwargs } self.namestring = type(self).__name__ super(PandasRequestsCSV, self).__init__( pre_func, post_func, asset_finder, trading_day, start_date, end_date, date_column, date_format, timezone, symbol, mask, symbol_column, data_frequency, country_code=country_code, **remaining_kwargs ) self.fetch_size = None self.fetch_hash = None self.df = self.load_df() self.special_params_checker = special_params_checker @property def requests_kwargs(self): return self._requests_kwargs def fetch_url(self, url): info = "checking {url} with {params}" logger.info(info.format(url=url, params=self.requests_kwargs)) # setting decode_unicode=True sometimes results in a # UnicodeEncodeError exception, so instead we'll use # pandas logic for decoding content try: response = requests.get(url, **self.requests_kwargs) except requests.exceptions.ConnectionError: raise Exception('Could not connect to %s' % url) if not response.ok: raise Exception('Problem reaching %s' % url) elif response.is_redirect: # On the offchance we don't catch a redirect URL # in validation, this will catch it. new_url = response.headers['location'] raise FetcherCSVRedirectError( url=url, new_url=new_url, extra={ 'old_url': url, 'new_url': new_url } ) content_length = 0 logger.info('{} connection established in {:.1f} seconds'.format( url, response.elapsed.total_seconds())) # use the decode_unicode flag to ensure that the output of this is # a string, and not bytes. for chunk in response.iter_content(self.CONTENT_CHUNK_SIZE, decode_unicode=True): if content_length > self.MAX_DOCUMENT_SIZE: raise Exception('Document size too big.') if chunk: content_length += len(chunk) yield chunk return def fetch_data(self): # create a data frame directly from the full text of # the response from the returned file-descriptor. data = self.fetch_url(self.url) fd = StringIO() if isinstance(data, str): fd.write(data) else: for chunk in data: fd.write(chunk) self.fetch_size = fd.tell() fd.seek(0) try: # see if pandas can parse csv data frames = read_csv(fd, **self.pandas_kwargs) frames_hash = hashlib.md5(str(fd.getvalue()).encode('utf-8')) self.fetch_hash = frames_hash.hexdigest() except pd.parser.CParserError: # could not parse the data, raise exception raise Exception('Error parsing remote CSV data.') finally: fd.close() return frames
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/sources/requests_csv.py
requests_csv.py
from abc import ( ABCMeta, abstractmethod, abstractproperty, ) from numpy import concatenate from lru import LRU from pandas import isnull from toolz import sliding_window from six import with_metaclass from zipline.assets import Equity, Future from zipline.assets.continuous_futures import ContinuousFuture from zipline.lib._int64window import AdjustedArrayWindow as Int64Window from zipline.lib._float64window import AdjustedArrayWindow as Float64Window from zipline.lib.adjustment import Float64Multiply, Float64Add from zipline.utils.cache import ExpiringCache from zipline.utils.math_utils import number_of_decimal_places from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import float64_dtype from zipline.utils.pandas_utils import find_in_sorted_index, normalize_date # Default number of decimal places used for rounding asset prices. DEFAULT_ASSET_PRICE_DECIMALS = 3 class HistoryCompatibleUSEquityAdjustmentReader(object): def __init__(self, adjustment_reader): self._adjustments_reader = adjustment_reader def load_pricing_adjustments(self, columns, dts, assets): """ Returns ------- adjustments : list[dict[int -> Adjustment]] A list, where each element corresponds to the `columns`, of mappings from index to adjustment objects to apply at that index. """ out = [None] * len(columns) for i, column in enumerate(columns): adjs = {} for asset in assets: adjs.update(self._get_adjustments_in_range( asset, dts, column)) out[i] = adjs return out def _get_adjustments_in_range(self, asset, dts, field): """ Get the Float64Multiply objects to pass to an AdjustedArrayWindow. For the use of AdjustedArrayWindow in the loader, which looks back from current simulation time back to a window of data the dictionary is structured with: - the key into the dictionary for adjustments is the location of the day from which the window is being viewed. - the start of all multiply objects is always 0 (in each window all adjustments are overlapping) - the end of the multiply object is the location before the calendar location of the adjustment action, making all days before the event adjusted. Parameters ---------- asset : Asset The assets for which to get adjustments. dts : iterable of datetime64-like The dts for which adjustment data is needed. field : str OHLCV field for which to get the adjustments. Returns ------- out : dict[loc -> Float64Multiply] The adjustments as a dict of loc -> Float64Multiply """ sid = int(asset) start = normalize_date(dts[0]) end = normalize_date(dts[-1]) adjs = {} if field != 'volume': mergers = self._adjustments_reader.get_adjustments_for_sid( 'mergers', sid) for m in mergers: dt = m[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, m[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] divs = self._adjustments_reader.get_adjustments_for_sid( 'dividends', sid) for d in divs: dt = d[0] if start < dt <= end: end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, d[1]) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] splits = self._adjustments_reader.get_adjustments_for_sid( 'splits', sid) for s in splits: dt = s[0] if start < dt <= end: if field == 'volume': ratio = 1.0 / s[1] else: ratio = s[1] end_loc = dts.searchsorted(dt) adj_loc = end_loc mult = Float64Multiply(0, end_loc - 1, 0, 0, ratio) try: adjs[adj_loc].append(mult) except KeyError: adjs[adj_loc] = [mult] return adjs class ContinuousFutureAdjustmentReader(object): """ Calculates adjustments for continuous futures, based on the close and open of the contracts on the either side of each roll. """ def __init__(self, trading_calendar, asset_finder, bar_reader, roll_finders, frequency): self._trading_calendar = trading_calendar self._asset_finder = asset_finder self._bar_reader = bar_reader self._roll_finders = roll_finders self._frequency = frequency def load_pricing_adjustments(self, columns, dts, assets): """ Returns ------- adjustments : list[dict[int -> Adjustment]] A list, where each element corresponds to the `columns`, of mappings from index to adjustment objects to apply at that index. """ out = [None] * len(columns) for i, column in enumerate(columns): adjs = {} for asset in assets: adjs.update(self._get_adjustments_in_range( asset, dts, column)) out[i] = adjs return out def _make_adjustment(self, adjustment_type, front_close, back_close, end_loc): adj_base = back_close - front_close if adjustment_type == 'mul': adj_value = 1.0 + adj_base / front_close adj_class = Float64Multiply elif adjustment_type == 'add': adj_value = adj_base adj_class = Float64Add return adj_class(0, end_loc, 0, 0, adj_value) def _get_adjustments_in_range(self, cf, dts, field): if field == 'volume' or field == 'sid': return {} if cf.adjustment is None: return {} rf = self._roll_finders[cf.roll_style] partitions = [] rolls = rf.get_rolls(cf.root_symbol, dts[0], dts[-1], cf.offset) tc = self._trading_calendar adjs = {} for front, back in sliding_window(2, rolls): front_sid, roll_dt = front back_sid = back[0] dt = tc.previous_session_label(roll_dt) if self._frequency == 'minute': dt = tc.open_and_close_for_session(dt)[1] roll_dt = tc.open_and_close_for_session(roll_dt)[0] partitions.append((front_sid, back_sid, dt, roll_dt)) for partition in partitions: front_sid, back_sid, dt, roll_dt = partition last_front_dt = self._bar_reader.get_last_traded_dt( self._asset_finder.retrieve_asset(front_sid), dt) last_back_dt = self._bar_reader.get_last_traded_dt( self._asset_finder.retrieve_asset(back_sid), dt) if isnull(last_front_dt) or isnull(last_back_dt): continue front_close = self._bar_reader.get_value( front_sid, last_front_dt, 'close') back_close = self._bar_reader.get_value( back_sid, last_back_dt, 'close') adj_loc = dts.searchsorted(roll_dt) end_loc = adj_loc - 1 adj = self._make_adjustment(cf.adjustment, front_close, back_close, end_loc) try: adjs[adj_loc].append(adj) except KeyError: adjs[adj_loc] = [adj] return adjs class SlidingWindow(object): """ Wrapper around an AdjustedArrayWindow which supports monotonically increasing (by datetime) requests for a sized window of data. Parameters ---------- window : AdjustedArrayWindow Window of pricing data with prefetched values beyond the current simulation dt. cal_start : int Index in the overall calendar at which the window starts. """ def __init__(self, window, size, cal_start, offset): self.window = window self.cal_start = cal_start self.current = next(window) self.offset = offset self.most_recent_ix = self.cal_start + size def get(self, end_ix): """ Returns ------- out : A np.ndarray of the equity pricing up to end_ix after adjustments and rounding have been applied. """ if self.most_recent_ix == end_ix: return self.current target = end_ix - self.cal_start - self.offset + 1 self.current = self.window.seek(target) self.most_recent_ix = end_ix return self.current class HistoryLoader(with_metaclass(ABCMeta)): """ Loader for sliding history windows, with support for adjustments. Parameters ---------- trading_calendar: TradingCalendar Contains the grouping logic needed to assign minutes to periods. reader : DailyBarReader, MinuteBarReader Reader for pricing bars. adjustment_reader : SQLiteAdjustmentReader Reader for adjustment data. """ FIELDS = ('open', 'high', 'low', 'close', 'volume', 'sid') def __init__(self, trading_calendar, reader, equity_adjustment_reader, asset_finder, roll_finders=None, sid_cache_size=1000, prefetch_length=0): self.trading_calendar = trading_calendar self._asset_finder = asset_finder self._reader = reader self._adjustment_readers = {} if equity_adjustment_reader is not None: self._adjustment_readers[Equity] = \ HistoryCompatibleUSEquityAdjustmentReader( equity_adjustment_reader) if roll_finders: self._adjustment_readers[ContinuousFuture] =\ ContinuousFutureAdjustmentReader(trading_calendar, asset_finder, reader, roll_finders, self._frequency) self._window_blocks = { field: ExpiringCache(LRU(sid_cache_size)) for field in self.FIELDS } self._prefetch_length = prefetch_length @abstractproperty def _frequency(self): pass @abstractproperty def _calendar(self): pass @abstractmethod def _array(self, start, end, assets, field): pass def _decimal_places_for_asset(self, asset, reference_date): if isinstance(asset, Future) and asset.tick_size: return number_of_decimal_places(asset.tick_size) elif isinstance(asset, ContinuousFuture): # Tick size should be the same for all contracts of a continuous # future, so arbitrarily get the contract with next upcoming auto # close date. oc = self._asset_finder.get_ordered_contracts(asset.root_symbol) contract_sid = oc.contract_before_auto_close(reference_date.value) if contract_sid is not None: contract = self._asset_finder.retrieve_asset(contract_sid) if contract.tick_size: return number_of_decimal_places(contract.tick_size) return DEFAULT_ASSET_PRICE_DECIMALS def _ensure_sliding_windows(self, assets, dts, field, is_perspective_after): """ Ensure that there is a Float64Multiply window for each asset that can provide data for the given parameters. If the corresponding window for the (assets, len(dts), field) does not exist, then create a new one. If a corresponding window does exist for (assets, len(dts), field), but can not provide data for the current dts range, then create a new one and replace the expired window. Parameters ---------- assets : iterable of Assets The assets in the window dts : iterable of datetime64-like The datetimes for which to fetch data. Makes an assumption that all dts are present and contiguous, in the calendar. field : str The OHLCV field for which to retrieve data. is_perspective_after : bool see: `PricingHistoryLoader.history` Returns ------- out : list of Float64Window with sufficient data so that each asset's window can provide `get` for the index corresponding with the last value in `dts` """ end = dts[-1] size = len(dts) asset_windows = {} needed_assets = [] cal = self._calendar assets = self._asset_finder.retrieve_all(assets) end_ix = find_in_sorted_index(cal, end) for asset in assets: try: window = self._window_blocks[field].get( (asset, size, is_perspective_after), end) except KeyError: needed_assets.append(asset) else: if end_ix < window.most_recent_ix: # Window needs reset. Requested end index occurs before the # end index from the previous history call for this window. # Grab new window instead of rewinding adjustments. needed_assets.append(asset) else: asset_windows[asset] = window if needed_assets: offset = 0 start_ix = find_in_sorted_index(cal, dts[0]) prefetch_end_ix = min(end_ix + self._prefetch_length, len(cal) - 1) prefetch_end = cal[prefetch_end_ix] prefetch_dts = cal[start_ix:prefetch_end_ix + 1] if is_perspective_after: adj_end_ix = min(prefetch_end_ix + 1, len(cal) - 1) adj_dts = cal[start_ix:adj_end_ix + 1] else: adj_dts = prefetch_dts prefetch_len = len(prefetch_dts) array = self._array(prefetch_dts, needed_assets, field) if field == 'sid': window_type = Int64Window else: window_type = Float64Window view_kwargs = {} if field == 'volume': array = array.astype(float64_dtype) for i, asset in enumerate(needed_assets): adj_reader = None try: adj_reader = self._adjustment_readers[type(asset)] except KeyError: adj_reader = None if adj_reader is not None: adjs = adj_reader.load_pricing_adjustments( [field], adj_dts, [asset])[0] else: adjs = {} window = window_type( array[:, i].reshape(prefetch_len, 1), view_kwargs, adjs, offset, size, int(is_perspective_after), self._decimal_places_for_asset(asset, dts[-1]), ) sliding_window = SlidingWindow(window, size, start_ix, offset) asset_windows[asset] = sliding_window self._window_blocks[field].set( (asset, size, is_perspective_after), sliding_window, prefetch_end) return [asset_windows[asset] for asset in assets] def history(self, assets, dts, field, is_perspective_after): """ A window of pricing data with adjustments applied assuming that the end of the window is the day before the current simulation time. Parameters ---------- assets : iterable of Assets The assets in the window. dts : iterable of datetime64-like The datetimes for which to fetch data. Makes an assumption that all dts are present and contiguous, in the calendar. field : str The OHLCV field for which to retrieve data. is_perspective_after : bool True, if the window is being viewed immediately after the last dt in the sliding window. False, if the window is viewed on the last dt. This flag is used for handling the case where the last dt in the requested window immediately precedes a corporate action, e.g.: - is_perspective_after is True When the viewpoint is after the last dt in the window, as when a daily history window is accessed from a simulation that uses a minute data frequency, the history call to this loader will not include the current simulation dt. At that point in time, the raw data for the last day in the window will require adjustment, so the most recent adjustment with respect to the simulation time is applied to the last dt in the requested window. An example equity which has a 0.5 split ratio dated for 05-27, with the dts for a history call of 5 bars with a '1d' frequency at 05-27 9:31. Simulation frequency is 'minute'. (In this case this function is called with 4 daily dts, and the calling function is responsible for stitching back on the 'current' dt) | | | | | last dt | <-- viewer is here | | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 9:31 | | raw | 10.10 | 10.20 | 10.30 | 10.40 | | | adj | 5.05 | 5.10 | 5.15 | 5.25 | | The adjustment is applied to the last dt, 05-26, and all previous dts. - is_perspective_after is False, daily When the viewpoint is the same point in time as the last dt in the window, as when a daily history window is accessed from a simulation that uses a daily data frequency, the history call will include the current dt. At that point in time, the raw data for the last day in the window will be post-adjustment, so no adjustment is applied to the last dt. An example equity which has a 0.5 split ratio dated for 05-27, with the dts for a history call of 5 bars with a '1d' frequency at 05-27 0:00. Simulation frequency is 'daily'. | | | | | | <-- viewer is here | | | | | | | last dt | | | 05-23 | 05-24 | 05-25 | 05-26 | 05-27 | | raw | 10.10 | 10.20 | 10.30 | 10.40 | 5.25 | | adj | 5.05 | 5.10 | 5.15 | 5.20 | 5.25 | Adjustments are applied 05-23 through 05-26 but not to the last dt, 05-27 Returns ------- out : np.ndarray with shape(len(days between start, end), len(assets)) """ block = self._ensure_sliding_windows(assets, dts, field, is_perspective_after) end_ix = self._calendar.searchsorted(dts[-1]) return concatenate( [window.get(end_ix) for window in block], axis=1, ) class DailyHistoryLoader(HistoryLoader): @property def _frequency(self): return 'daily' @property def _calendar(self): return self._reader.sessions def _array(self, dts, assets, field): return self._reader.load_raw_arrays( [field], dts[0], dts[-1], assets, )[0] class MinuteHistoryLoader(HistoryLoader): @property def _frequency(self): return 'minute' @lazyval def _calendar(self): mm = self.trading_calendar.all_minutes start = mm.searchsorted(self._reader.first_trading_day) end = mm.searchsorted(self._reader.last_available_dt, side='right') return mm[start:end] def _array(self, dts, assets, field): return self._reader.load_raw_arrays( [field], dts[0], dts[-1], assets, )[0]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/history_loader.py
history_loader.py
from functools import partial import warnings from bcolz import carray, ctable import logbook import numpy as np from numpy import ( array, full, iinfo, nan, ) from pandas import ( DatetimeIndex, NaT, read_csv, to_datetime, Timestamp, ) from six import iteritems, viewkeys from toolz import compose from trading_calendars import get_calendar from zipline.data.session_bars import CurrencyAwareSessionBarReader from zipline.data.bar_reader import ( NoDataAfterDate, NoDataBeforeDate, NoDataOnDate, ) from zipline.utils.functional import apply from zipline.utils.input_validation import expect_element from zipline.utils.numpy_utils import iNaT, float64_dtype, uint32_dtype from zipline.utils.memoize import lazyval from zipline.utils.cli import maybe_show_progress from ._equities import _compute_row_slices, _read_bcolz_data logger = logbook.Logger('UsEquityPricing') OHLC = frozenset(['open', 'high', 'low', 'close']) US_EQUITY_PRICING_BCOLZ_COLUMNS = ( 'open', 'high', 'low', 'close', 'volume', 'day', 'id' ) UINT32_MAX = iinfo(np.uint32).max def check_uint32_safe(value, colname): if value >= UINT32_MAX: raise ValueError( "Value %s from column '%s' is too large" % (value, colname) ) @expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'}) def winsorise_uint32(df, invalid_data_behavior, column, *columns): """Drops any record where a value would not fit into a uint32. Parameters ---------- df : pd.DataFrame The dataframe to winsorise. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is outside the bounds of a uint32. *columns : iterable[str] The names of the columns to check. Returns ------- truncated : pd.DataFrame ``df`` with values that do not fit into a uint32 zeroed out. """ columns = list((column,) + columns) mask = df[columns] > UINT32_MAX if invalid_data_behavior != 'ignore': mask |= df[columns].isnull() else: # we are not going to generate a warning or error for this so just use # nan_to_num df[columns] = np.nan_to_num(df[columns]) mv = mask.values if mv.any(): if invalid_data_behavior == 'raise': raise ValueError( '%d values out of bounds for uint32: %r' % ( mv.sum(), df[mask.any(axis=1)], ), ) if invalid_data_behavior == 'warn': warnings.warn( 'Ignoring %d values because they are out of bounds for' ' uint32: %r' % ( mv.sum(), df[mask.any(axis=1)], ), stacklevel=3, # one extra frame for `expect_element` ) df[mask] = 0 return df class BcolzDailyBarWriter(object): """ Class capable of writing daily OHLCV data to disk in a format that can be read efficiently by BcolzDailyOHLCVReader. Parameters ---------- filename : str The location at which we should write our output. calendar : zipline.utils.calendar.trading_calendar Calendar to use to compute asset calendar offsets. start_session: pd.Timestamp Midnight UTC session label. end_session: pd.Timestamp Midnight UTC session label. See Also -------- zipline.data.bcolz_daily_bars.BcolzDailyBarReader """ _csv_dtypes = { 'open': float64_dtype, 'high': float64_dtype, 'low': float64_dtype, 'close': float64_dtype, 'volume': float64_dtype, } def __init__(self, filename, calendar, start_session, end_session): self._filename = filename if start_session != end_session: if not calendar.is_session(start_session): raise ValueError( "Start session %s is invalid!" % start_session ) if not calendar.is_session(end_session): raise ValueError( "End session %s is invalid!" % end_session ) self._start_session = start_session self._end_session = end_session self._calendar = calendar @property def progress_bar_message(self): return "Merging daily equity files:" def progress_bar_item_show_func(self, value): return value if value is None else str(value[0]) def write(self, data, assets=None, show_progress=False, invalid_data_behavior='warn'): """ Parameters ---------- data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. assets : set[int], optional The assets that should be in ``data``. If this is provided we will check ``data`` against the assets and provide better progress information. show_progress : bool, optional Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional What to do when data is encountered that is outside the range of a uint32. Returns ------- table : bcolz.ctable The newly-written table. """ ctx = maybe_show_progress( ( (sid, self.to_ctable(df, invalid_data_behavior)) for sid, df in data ), show_progress=show_progress, item_show_func=self.progress_bar_item_show_func, label=self.progress_bar_message, length=len(assets) if assets is not None else None, ) with ctx as it: return self._write_internal(it, assets) def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior='warn'): """Read CSVs as DataFrames from our asset map. Parameters ---------- asset_map : dict[int -> str] A mapping from asset id to file path with the CSV data for that asset show_progress : bool Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is encountered that is outside the range of a uint32. """ read = partial( read_csv, parse_dates=['day'], index_col='day', dtype=self._csv_dtypes, ) return self.write( ((asset, read(path)) for asset, path in iteritems(asset_map)), assets=viewkeys(asset_map), show_progress=show_progress, invalid_data_behavior=invalid_data_behavior, ) def _write_internal(self, iterator, assets): """ Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, ctable). """ total_rows = 0 first_row = {} last_row = {} calendar_offset = {} # Maps column name -> output carray. columns = { k: carray(array([], dtype=uint32_dtype)) for k in US_EQUITY_PRICING_BCOLZ_COLUMNS } earliest_date = None sessions = self._calendar.sessions_in_range( self._start_session, self._end_session ) if assets is not None: @apply def iterator(iterator=iterator, assets=set(assets)): for asset_id, table in iterator: if asset_id not in assets: raise ValueError('unknown asset id %r' % asset_id) yield asset_id, table for asset_id, table in iterator: nrows = len(table) for column_name in columns: if column_name == 'id': # We know what the content of this column is, so don't # bother reading it. columns['id'].append( full((nrows,), asset_id, dtype='uint32'), ) continue columns[column_name].append(table[column_name]) if earliest_date is None: earliest_date = table["day"][0] else: earliest_date = min(earliest_date, table["day"][0]) # Bcolz doesn't support ints as keys in `attrs`, so convert # assets to strings for use as attr keys. asset_key = str(asset_id) # Calculate the index into the array of the first and last row # for this asset. This allows us to efficiently load single # assets when querying the data back out of the table. first_row[asset_key] = total_rows last_row[asset_key] = total_rows + nrows - 1 total_rows += nrows table_day_to_session = compose( self._calendar.minute_to_session_label, partial(Timestamp, unit='s', tz='UTC'), ) asset_first_day = table_day_to_session(table['day'][0]) asset_last_day = table_day_to_session(table['day'][-1]) asset_sessions = sessions[ sessions.slice_indexer(asset_first_day, asset_last_day) ] if len(table) != len(asset_sessions): msg = ( 'Asset id: {}, Got {} rows for daily bars table with first day={}, last ' 'day={}, expected {} rows.\n' 'Missing sessions: {}\n' 'Extra sessions: {}. Skipping it'.format( asset_id, len(table), asset_first_day.date(), asset_last_day.date(), len(asset_sessions), asset_sessions.difference( to_datetime( np.array(table['day']), unit='s', utc=True, ) ).tolist(), to_datetime( np.array(table['day']), unit='s', utc=True, ).difference(asset_sessions).tolist(), ) ) logger.warning(msg) continue # Calculate the number of trading days between the first date # in the stored data and the first date of **this** asset. This # offset used for output alignment by the reader. calendar_offset[asset_key] = sessions.get_loc(asset_first_day) # This writes the table to disk. full_table = ctable( columns=[ columns[colname] for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS ], names=US_EQUITY_PRICING_BCOLZ_COLUMNS, rootdir=self._filename, mode='w', ) full_table.attrs['first_trading_day'] = ( earliest_date if earliest_date is not None else iNaT ) full_table.attrs['first_row'] = first_row full_table.attrs['last_row'] = last_row full_table.attrs['calendar_offset'] = calendar_offset full_table.attrs['calendar_name'] = self._calendar.name full_table.attrs['start_session_ns'] = self._start_session.value full_table.attrs['end_session_ns'] = self._end_session.value full_table.flush() return full_table @expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'}) def to_ctable(self, raw_data, invalid_data_behavior): if isinstance(raw_data, ctable): # we already have a ctable so do nothing return raw_data winsorise_uint32(raw_data, invalid_data_behavior, 'volume', *OHLC) processed = (raw_data[list(OHLC)] * 1000).round().astype('uint32') dates = raw_data.index.values.astype('datetime64[s]') check_uint32_safe(dates.max().view(np.int64), 'day') processed['day'] = dates.astype('uint32') processed['volume'] = raw_data.volume.astype('uint32') return ctable.fromdataframe(processed) class BcolzDailyBarReader(CurrencyAwareSessionBarReader): """ Reader for raw pricing data written by BcolzDailyOHLCVWriter. Parameters ---------- table : bcolz.ctable The ctable contaning the pricing data, with attrs corresponding to the Attributes list below. read_all_threshold : int The number of equities at which; below, the data is read by reading a slice from the carray per asset. above, the data is read by pulling all of the data for all assets into memory and then indexing into that array for each day and asset pair. Used to tune performance of reads when using a small or large number of equities. Attributes ---------- The table with which this loader interacts contains the following attributes: first_row : dict Map from asset_id -> index of first row in the dataset with that id. last_row : dict Map from asset_id -> index of last row in the dataset with that id. calendar_offset : dict Map from asset_id -> calendar index of first row. start_session_ns: int Epoch ns of the first session used in this dataset. end_session_ns: int Epoch ns of the last session used in this dataset. calendar_name: str String identifier of trading calendar used (ie, "NYSE"). We use first_row and last_row together to quickly find ranges of rows to load when reading an asset's data into memory. We use calendar_offset and calendar to orient loaded blocks within a range of queried dates. Notes ------ A Bcolz CTable is comprised of Columns and Attributes. The table with which this loader interacts contains the following columns: ['open', 'high', 'low', 'close', 'volume', 'day', 'id']. The data in these columns is interpreted as follows: - Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 * as-traded dollar value. - Volume is interpreted as as-traded volume. - Day is interpreted as seconds since midnight UTC, Jan 1, 1970. - Id is the asset id of the row. The data in each column is grouped by asset and then sorted by day within each asset block. The table is built to represent a long time range of data, e.g. ten years of equity data, so the lengths of each asset block is not equal to each other. The blocks are clipped to the known start and end date of each asset to cut down on the number of empty values that would need to be included to make a regular/cubic dataset. When read across the open, high, low, close, and volume with the same index should represent the same asset and day. See Also -------- zipline.data.bcolz_daily_bars.BcolzDailyBarWriter """ def __init__(self, table, read_all_threshold=3000): self._maybe_table_rootdir = table # Cache of fully read np.array for the carrays in the daily bar table. # raw_array does not use the same cache, but it could. # Need to test keeping the entire array in memory for the course of a # process first. self._spot_cols = {} self.PRICE_ADJUSTMENT_FACTOR = 0.001 self._read_all_threshold = read_all_threshold @lazyval def _table(self): maybe_table_rootdir = self._maybe_table_rootdir if isinstance(maybe_table_rootdir, ctable): return maybe_table_rootdir return ctable(rootdir=maybe_table_rootdir, mode='r') @lazyval def sessions(self): if 'calendar' in self._table.attrs.attrs: # backwards compatibility with old formats, will remove return DatetimeIndex(self._table.attrs['calendar'], tz='UTC') else: cal = get_calendar(self._table.attrs['calendar_name']) start_session_ns = self._table.attrs['start_session_ns'] start_session = Timestamp(start_session_ns, tz='UTC') end_session_ns = self._table.attrs['end_session_ns'] end_session = Timestamp(end_session_ns, tz='UTC') sessions = cal.sessions_in_range(start_session, end_session) return sessions @lazyval def _first_rows(self): return { int(asset_id): start_index for asset_id, start_index in iteritems( self._table.attrs['first_row'], ) } @lazyval def _last_rows(self): return { int(asset_id): end_index for asset_id, end_index in iteritems( self._table.attrs['last_row'], ) } @lazyval def _calendar_offsets(self): return { int(id_): offset for id_, offset in iteritems( self._table.attrs['calendar_offset'], ) } @lazyval def first_trading_day(self): try: return Timestamp( self._table.attrs['first_trading_day'], unit='s', tz='UTC' ) except KeyError: return None @lazyval def trading_calendar(self): if 'calendar_name' in self._table.attrs.attrs: return get_calendar(self._table.attrs['calendar_name']) else: return None @property def last_available_dt(self): return self.sessions[-1] def _compute_slices(self, start_idx, end_idx, assets): """ Compute the raw row indices to load for each asset on a query for the given dates after applying a shift. Parameters ---------- start_idx : int Index of first date for which we want data. end_idx : int Index of last date for which we want data. assets : pandas.Int64Index Assets for which we want to compute row indices Returns ------- A 3-tuple of (first_rows, last_rows, offsets): first_rows : np.array[intp] Array with length == len(assets) containing the index of the first row to load for each asset in `assets`. last_rows : np.array[intp] Array with length == len(assets) containing the index of the last row to load for each asset in `assets`. offset : np.array[intp] Array with length == (len(asset) containing the index in a buffer of length `dates` corresponding to the first row of each asset. The value of offset[i] will be 0 if asset[i] existed at the start of a query. Otherwise, offset[i] will be equal to the number of entries in `dates` for which the asset did not yet exist. """ # The core implementation of the logic here is implemented in Cython # for efficiency. return _compute_row_slices( self._first_rows, self._last_rows, self._calendar_offsets, start_idx, end_idx, assets, ) def load_raw_arrays(self, columns, start_date, end_date, assets): start_idx = self._load_raw_arrays_date_to_index(start_date) end_idx = self._load_raw_arrays_date_to_index(end_date) first_rows, last_rows, offsets = self._compute_slices( start_idx, end_idx, assets, ) read_all = len(assets) > self._read_all_threshold return _read_bcolz_data( self._table, (end_idx - start_idx + 1, len(assets)), list(columns), first_rows, last_rows, offsets, read_all, ) def _load_raw_arrays_date_to_index(self, date): try: return self.sessions.get_loc(date) except KeyError: raise NoDataOnDate(date) def _spot_col(self, colname): """ Get the colname from daily_bar_table and read all of it into memory, caching the result. Parameters ---------- colname : string A name of a OHLCV carray in the daily_bar_table Returns ------- array (uint32) Full read array of the carray in the daily_bar_table with the given colname. """ try: col = self._spot_cols[colname] except KeyError: col = self._spot_cols[colname] = self._table[colname] return col def get_last_traded_dt(self, asset, day): volumes = self._spot_col('volume') search_day = day while True: try: ix = self.sid_day_index(asset, search_day) except NoDataBeforeDate: return NaT except NoDataAfterDate: prev_day_ix = self.sessions.get_loc(search_day) - 1 if prev_day_ix > -1: search_day = self.sessions[prev_day_ix] continue except NoDataOnDate: return NaT if volumes[ix] != 0: return search_day prev_day_ix = self.sessions.get_loc(search_day) - 1 if prev_day_ix > -1: search_day = self.sessions[prev_day_ix] else: return NaT def sid_day_index(self, sid, day): """ Parameters ---------- sid : int The asset identifier. day : datetime64-like Midnight of the day for which data is requested. Returns ------- int Index into the data tape for the given sid and day. Raises a NoDataOnDate exception if the given day and sid is before or after the date range of the equity. """ try: day_loc = self.sessions.get_loc(day) except Exception: raise NoDataOnDate("day={0} is outside of calendar={1}".format( day, self.sessions)) offset = day_loc - self._calendar_offsets[sid] if offset < 0: raise NoDataBeforeDate( "No data on or before day={0} for sid={1}".format( day, sid)) ix = self._first_rows[sid] + offset if ix > self._last_rows[sid]: raise NoDataAfterDate( "No data on or after day={0} for sid={1}".format( day, sid)) return ix def get_value(self, sid, dt, field): """ Parameters ---------- sid : int The asset identifier. day : datetime64-like Midnight of the day for which data is requested. colname : string The price field. e.g. ('open', 'high', 'low', 'close', 'volume') Returns ------- float The spot price for colname of the given sid on the given day. Raises a NoDataOnDate exception if the given day and sid is before or after the date range of the equity. Returns -1 if the day is within the date range, but the price is 0. """ ix = self.sid_day_index(sid, dt) price = self._spot_col(field)[ix] if field != 'volume': if price == 0: return nan else: return price * 0.001 else: return price def currency_codes(self, sids): # XXX: This is pretty inefficient. This reader doesn't really support # country codes, so we always either return USD or None if we don't # know about the sid at all. first_rows = self._first_rows out = [] for sid in sids: if sid in first_rows: out.append('USD') else: out.append(None) return np.array(out, dtype=object)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bcolz_daily_bars.py
bcolz_daily_bars.py
from six import iteritems import numpy as np import pandas as pd from pandas import NaT from trading_calendars import TradingCalendar from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid from zipline.data.session_bars import CurrencyAwareSessionBarReader from zipline.utils.input_validation import expect_types, validate_keys from zipline.utils.pandas_utils import check_indexes_all_same class InMemoryDailyBarReader(CurrencyAwareSessionBarReader): """ A SessionBarReader backed by a dictionary of in-memory DataFrames. Parameters ---------- frames : dict[str -> pd.DataFrame] Dictionary from field name ("open", "high", "low", "close", or "volume") to DataFrame containing data for that field. calendar : str or trading_calendars.TradingCalendar Calendar (or name of calendar) to which data is aligned. currency_codes : pd.Series Map from sid -> listing currency for that sid. verify_indices : bool, optional Whether or not to verify that input data is correctly aligned to the given calendar. Default is True. """ @expect_types( frames=dict, calendar=TradingCalendar, verify_indices=bool, currency_codes=pd.Series, ) def __init__(self, frames, calendar, currency_codes, verify_indices=True): self._frames = frames self._values = {key: frame.values for key, frame in iteritems(frames)} self._calendar = calendar self._currency_codes = currency_codes validate_keys(frames, set(OHLCV), type(self).__name__) if verify_indices: verify_frames_aligned(list(frames.values()), calendar) self._sessions = frames['close'].index self._sids = frames['close'].columns @classmethod def from_panel(cls, panel, calendar, currency_codes): """Helper for construction from a pandas.Panel. """ return cls(dict(panel), calendar, currency_codes) @property def last_available_dt(self): return self._calendar[-1] @property def trading_calendar(self): return self._calendar @property def sessions(self): return self._sessions def load_raw_arrays(self, columns, start_dt, end_dt, assets): if start_dt not in self._sessions: raise NoDataOnDate(start_dt) if end_dt not in self._sessions: raise NoDataOnDate(end_dt) asset_indexer = self._sids.get_indexer(assets) if -1 in asset_indexer: bad_assets = assets[asset_indexer == -1] raise NoDataForSid(bad_assets) date_indexer = self._sessions.slice_indexer(start_dt, end_dt) out = [] for c in columns: out.append(self._values[c][date_indexer, asset_indexer]) return out def get_value(self, sid, dt, field): """ Parameters ---------- sid : int The asset identifier. day : datetime64-like Midnight of the day for which data is requested. field : string The price field. e.g. ('open', 'high', 'low', 'close', 'volume') Returns ------- float The spot price for colname of the given sid on the given day. Raises a NoDataOnDate exception if the given day and sid is before or after the date range of the equity. Returns -1 if the day is within the date range, but the price is 0. """ return self.frames[field].loc[dt, sid] def get_last_traded_dt(self, asset, dt): """ Parameters ---------- asset : zipline.asset.Asset The asset identifier. dt : datetime64-like Midnight of the day for which data is requested. Returns ------- pd.Timestamp : The last know dt for the asset and dt; NaT if no trade is found before the given dt. """ try: return self.frames['close'].loc[:, asset.sid].last_valid_index() except IndexError: return NaT @property def first_trading_day(self): return self._sessions[0] def currency_codes(self, sids): codes = self._currency_codes return np.array([codes[sid] for sid in sids]) def verify_frames_aligned(frames, calendar): """ Verify that DataFrames in ``frames`` have the same indexing scheme and are aligned to ``calendar``. Parameters ---------- frames : list[pd.DataFrame] calendar : trading_calendars.TradingCalendar Raises ------ ValueError If frames have different indexes/columns, or if frame indexes do not match a contiguous region of ``calendar``. """ indexes = [f.index for f in frames] check_indexes_all_same(indexes, message="DataFrame indexes don't match:") columns = [f.columns for f in frames] check_indexes_all_same(columns, message="DataFrame columns don't match:") start, end = indexes[0][[0, -1]] cal_sessions = calendar.sessions_in_range(start, end) check_indexes_all_same( [indexes[0], cal_sessions], "DataFrame index doesn't match {} calendar:".format(calendar.name), )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/in_memory_daily_bars.py
in_memory_daily_bars.py
from abc import ABCMeta, abstractmethod from numpy import ( full, nan, int64, zeros ) from six import iteritems, with_metaclass from zipline.utils.memoize import lazyval class AssetDispatchBarReader(with_metaclass(ABCMeta)): """ Parameters ---------- - trading_calendar : zipline.utils.trading_calendar.TradingCalendar - asset_finder : zipline.assets.AssetFinder - readers : dict A dict mapping Asset type to the corresponding [Minute|Session]BarReader - last_available_dt : pd.Timestamp or None, optional If not provided, infers it by using the min of the last_available_dt values of the underlying readers. """ def __init__( self, trading_calendar, asset_finder, readers, last_available_dt=None, ): self._trading_calendar = trading_calendar self._asset_finder = asset_finder self._readers = readers # if no timezone given, assume utf if last_available_dt and not last_available_dt.tzinfo: last_available_dt = last_available_dt.tz_localize('utc') self._last_available_dt = last_available_dt for t, r in iteritems(self._readers): assert trading_calendar == r.trading_calendar, \ "All readers must share target trading_calendar. " \ "Reader={0} for type={1} uses calendar={2} which does not " \ "match the desired shared calendar={3} ".format( r, t, r.trading_calendar, trading_calendar) @abstractmethod def _dt_window_size(self, start_dt, end_dt): pass @property def _asset_types(self): return self._readers.keys() def _make_raw_array_shape(self, start_dt, end_dt, num_sids): return self._dt_window_size(start_dt, end_dt), num_sids def _make_raw_array_out(self, field, shape): if field != 'volume' and field != 'sid': out = full(shape, nan) else: out = zeros(shape, dtype=int64) return out @property def trading_calendar(self): return self._trading_calendar @lazyval def last_available_dt(self): if self._last_available_dt is not None: return self._last_available_dt else: return max(r.last_available_dt for r in self._readers.values()) @lazyval def first_trading_day(self): return min(r.first_trading_day for r in self._readers.values()) def get_value(self, sid, dt, field): asset = self._asset_finder.retrieve_asset(sid) r = self._readers[type(asset)] return r.get_value(asset, dt, field) def get_last_traded_dt(self, asset, dt): r = self._readers[type(asset)] return r.get_last_traded_dt(asset, dt) def load_raw_arrays(self, fields, start_dt, end_dt, sids): asset_types = self._asset_types sid_groups = {t: [] for t in asset_types} out_pos = {t: [] for t in asset_types} assets = self._asset_finder.retrieve_all(sids) for i, asset in enumerate(assets): t = type(asset) sid_groups[t].append(asset) out_pos[t].append(i) batched_arrays = { t: self._readers[t].load_raw_arrays(fields, start_dt, end_dt, sid_groups[t]) for t in asset_types if sid_groups[t]} results = [] shape = self._make_raw_array_shape(start_dt, end_dt, len(sids)) for i, field in enumerate(fields): out = self._make_raw_array_out(field, shape) for t, arrays in iteritems(batched_arrays): out[:, out_pos[t]] = arrays[i] results.append(out) return results class AssetDispatchMinuteBarReader(AssetDispatchBarReader): def _dt_window_size(self, start_dt, end_dt): return len(self.trading_calendar.minutes_in_range(start_dt, end_dt)) class AssetDispatchSessionBarReader(AssetDispatchBarReader): def _dt_window_size(self, start_dt, end_dt): return len(self.trading_calendar.sessions_in_range(start_dt, end_dt)) @lazyval def sessions(self): return self.trading_calendar.sessions_in_range( self.first_trading_day, self.last_available_dt)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/dispatch_bar_reader.py
dispatch_bar_reader.py
from abc import ABCMeta, abstractmethod import json import os from glob import glob from os.path import join from textwrap import dedent from lru import LRU import bcolz from bcolz import ctable import h5py from intervaltree import IntervalTree import logbook import numpy as np import pandas as pd from pandas import HDFStore import tables from six import with_metaclass from toolz import keymap, valmap from trading_calendars import get_calendar from zipline.data._minute_bar_internal import ( minute_value, find_position_of_minute, find_last_traded_position_internal ) from zipline.gens.sim_engine import NANOS_IN_MINUTE from zipline.data.bar_reader import BarReader, NoDataForSid, NoDataOnDate from zipline.data.bcolz_daily_bars import check_uint32_safe from zipline.utils.cli import maybe_show_progress from zipline.utils.compat import mappingproxy from zipline.utils.memoize import lazyval logger = logbook.Logger('MinuteBars') US_EQUITIES_MINUTES_PER_DAY = 390 FUTURES_MINUTES_PER_DAY = 1440 DEFAULT_EXPECTEDLEN = US_EQUITIES_MINUTES_PER_DAY * 252 * 15 OHLC_RATIO = 1000 class BcolzMinuteOverlappingData(Exception): pass class BcolzMinuteWriterColumnMismatch(Exception): pass class MinuteBarReader(BarReader): @property def data_frequency(self): return "minute" def _calc_minute_index(market_opens, minutes_per_day): minutes = np.zeros(len(market_opens) * minutes_per_day, dtype='datetime64[ns]') deltas = np.arange(0, minutes_per_day, dtype='timedelta64[m]') for i, market_open in enumerate(market_opens): start = market_open.asm8 minute_values = start + deltas start_ix = minutes_per_day * i end_ix = start_ix + minutes_per_day minutes[start_ix:end_ix] = minute_values return pd.to_datetime(minutes, utc=True) def _sid_subdir_path(sid): """ Format subdir path to limit the number directories in any given subdirectory to 100. The number in each directory is designed to support at least 100000 equities. Parameters ---------- sid : int Asset identifier. Returns ------- out : string A path for the bcolz rootdir, including subdirectory prefixes based on the padded string representation of the given sid. e.g. 1 is formatted as 00/00/000001.bcolz """ padded_sid = format(sid, '06') return os.path.join( # subdir 1 00/XX padded_sid[0:2], # subdir 2 XX/00 padded_sid[2:4], "{0}.bcolz".format(str(padded_sid)) ) def convert_cols(cols, scale_factor, sid, invalid_data_behavior): """Adapt OHLCV columns into uint32 columns. Parameters ---------- cols : dict A dict mapping each column name (open, high, low, close, volume) to a float column to convert to uint32. scale_factor : int Factor to use to scale float values before converting to uint32. sid : int Sid of the relevant asset, for logging. invalid_data_behavior : str Specifies behavior when data cannot be converted to uint32. If 'raise', raises an exception. If 'warn', logs a warning and filters out incompatible values. If 'ignore', silently filters out incompatible values. """ scaled_opens = (np.nan_to_num(cols['open']) * scale_factor).round() scaled_highs = (np.nan_to_num(cols['high']) * scale_factor).round() scaled_lows = (np.nan_to_num(cols['low']) * scale_factor).round() scaled_closes = (np.nan_to_num(cols['close']) * scale_factor).round() exclude_mask = np.zeros_like(scaled_opens, dtype=bool) for col_name, scaled_col in [ ('open', scaled_opens), ('high', scaled_highs), ('low', scaled_lows), ('close', scaled_closes), ]: max_val = scaled_col.max() try: check_uint32_safe(max_val, col_name) except ValueError: if invalid_data_behavior == 'raise': raise if invalid_data_behavior == 'warn': logger.warn( 'Values for sid={}, col={} contain some too large for ' 'uint32 (max={}), filtering them out', sid, col_name, max_val, ) # We want to exclude all rows that have an unsafe value in # this column. exclude_mask &= (scaled_col >= np.iinfo(np.uint32).max) # Convert all cols to uint32. opens = scaled_opens.astype(np.uint32) highs = scaled_highs.astype(np.uint32) lows = scaled_lows.astype(np.uint32) closes = scaled_closes.astype(np.uint32) volumes = cols['volume'].astype(np.uint32) # Exclude rows with unsafe values by setting to zero. opens[exclude_mask] = 0 highs[exclude_mask] = 0 lows[exclude_mask] = 0 closes[exclude_mask] = 0 volumes[exclude_mask] = 0 return opens, highs, lows, closes, volumes class BcolzMinuteBarMetadata(object): """ Parameters ---------- ohlc_ratio : int The factor by which the pricing data is multiplied so that the float data can be stored as an integer. calendar : trading_calendars.trading_calendar.TradingCalendar The TradingCalendar on which the minute bars are based. start_session : datetime The first trading session in the data set. end_session : datetime The last trading session in the data set. minutes_per_day : int The number of minutes per each period. """ FORMAT_VERSION = 3 METADATA_FILENAME = 'metadata.json' @classmethod def metadata_path(cls, rootdir): return os.path.join(rootdir, cls.METADATA_FILENAME) @classmethod def read(cls, rootdir): path = cls.metadata_path(rootdir) with open(path) as fp: raw_data = json.load(fp) try: version = raw_data['version'] except KeyError: # Version was first written with version 1, assume 0, # if version does not match. version = 0 default_ohlc_ratio = raw_data['ohlc_ratio'] if version >= 1: minutes_per_day = raw_data['minutes_per_day'] else: # version 0 always assumed US equities. minutes_per_day = US_EQUITIES_MINUTES_PER_DAY if version >= 2: calendar = get_calendar(raw_data['calendar_name']) start_session = pd.Timestamp( raw_data['start_session'], tz='UTC') end_session = pd.Timestamp(raw_data['end_session'], tz='UTC') else: # No calendar info included in older versions, so # default to NYSE. calendar = get_calendar('XNYS') start_session = pd.Timestamp( raw_data['first_trading_day'], tz='UTC') end_session = calendar.minute_to_session_label( pd.Timestamp( raw_data['market_closes'][-1], unit='m', tz='UTC') ) if version >= 3: ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid'] if ohlc_ratios_per_sid is not None: ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid) else: ohlc_ratios_per_sid = None return cls( default_ohlc_ratio, ohlc_ratios_per_sid, calendar, start_session, end_session, minutes_per_day, version=version, ) def __init__( self, default_ohlc_ratio, ohlc_ratios_per_sid, calendar, start_session, end_session, minutes_per_day, version=FORMAT_VERSION, ): self.calendar = calendar self.start_session = start_session self.end_session = end_session self.default_ohlc_ratio = default_ohlc_ratio self.ohlc_ratios_per_sid = ohlc_ratios_per_sid self.minutes_per_day = minutes_per_day self.version = version def write(self, rootdir): """ Write the metadata to a JSON file in the rootdir. Values contained in the metadata are: version : int The value of FORMAT_VERSION of this class. ohlc_ratio : int The default ratio by which to multiply the pricing data to convert the floats from floats to an integer to fit within the np.uint32. If ohlc_ratios_per_sid is None or does not contain a mapping for a given sid, this ratio is used. ohlc_ratios_per_sid : dict A dict mapping each sid in the output to the factor by which the pricing data is multiplied so that the float data can be stored as an integer. minutes_per_day : int The number of minutes per each period. calendar_name : str The name of the TradingCalendar on which the minute bars are based. start_session : datetime 'YYYY-MM-DD' formatted representation of the first trading session in the data set. end_session : datetime 'YYYY-MM-DD' formatted representation of the last trading session in the data set. Deprecated, but included for backwards compatibility: first_trading_day : string 'YYYY-MM-DD' formatted representation of the first trading day available in the dataset. market_opens : list List of int64 values representing UTC market opens as minutes since epoch. market_closes : list List of int64 values representing UTC market closes as minutes since epoch. """ calendar = self.calendar slicer = calendar.schedule.index.slice_indexer( self.start_session, self.end_session, ) schedule = calendar.schedule[slicer] market_opens = schedule.market_open market_closes = schedule.market_close metadata = { 'version': self.version, 'ohlc_ratio': self.default_ohlc_ratio, 'ohlc_ratios_per_sid': self.ohlc_ratios_per_sid, 'minutes_per_day': self.minutes_per_day, 'calendar_name': self.calendar.name, 'start_session': str(self.start_session.date()), 'end_session': str(self.end_session.date()), # Write these values for backwards compatibility 'first_trading_day': str(self.start_session.date()), 'market_opens': ( market_opens.values.astype('datetime64[m]'). astype(np.int64).tolist()), 'market_closes': ( market_closes.values.astype('datetime64[m]'). astype(np.int64).tolist()), } with open(self.metadata_path(rootdir), 'w+') as fp: json.dump(metadata, fp) class BcolzMinuteBarWriter(object): """ Class capable of writing minute OHLCV data to disk into bcolz format. Parameters ---------- rootdir : string Path to the root directory into which to write the metadata and bcolz subdirectories. calendar : trading_calendars.trading_calendar.TradingCalendar The trading calendar on which to base the minute bars. Used to get the market opens used as a starting point for each periodic span of minutes in the index, and the market closes that correspond with the market opens. minutes_per_day : int The number of minutes per each period. Defaults to 390, the mode of minutes in NYSE trading days. start_session : datetime The first trading session in the data set. end_session : datetime The last trading session in the data set. default_ohlc_ratio : int, optional The default ratio by which to multiply the pricing data to convert from floats to integers that fit within np.uint32. If ohlc_ratios_per_sid is None or does not contain a mapping for a given sid, this ratio is used. Default is OHLC_RATIO (1000). ohlc_ratios_per_sid : dict, optional A dict mapping each sid in the output to the ratio by which to multiply the pricing data to convert the floats from floats to an integer to fit within the np.uint32. expectedlen : int, optional The expected length of the dataset, used when creating the initial bcolz ctable. If the expectedlen is not used, the chunksize and corresponding compression ratios are not ideal. Defaults to supporting 15 years of NYSE equity market data. see: http://bcolz.blosc.org/opt-tips.html#informing-about-the-length-of-your-carrays # noqa write_metadata : bool, optional If True, writes the minute bar metadata (on init of the writer). If False, no metadata is written (existing metadata is retained). Default is True. Notes ----- Writes a bcolz directory for each individual sid, all contained within a root directory which also contains metadata about the entire dataset. Each individual asset's data is stored as a bcolz table with a column for each pricing field: (open, high, low, close, volume) The open, high, low, and close columns are integers which are 1000 times the quoted price, so that the data can represented and stored as an np.uint32, supporting market prices quoted up to the thousands place. volume is a np.uint32 with no mutation of the tens place. The 'index' for each individual asset are a repeating period of minutes of length `minutes_per_day` starting from each market open. The file format does not account for half-days. e.g.: 2016-01-19 14:31 2016-01-19 14:32 ... 2016-01-19 20:59 2016-01-19 21:00 2016-01-20 14:31 2016-01-20 14:32 ... 2016-01-20 20:59 2016-01-20 21:00 All assets are written with a common 'index', sharing a common first trading day. Assets that do not begin trading until after the first trading day will have zeros for all pricing data up and until data is traded. 'index' is in quotations, because bcolz does not provide an index. The format allows index-like behavior by writing each minute's data into the corresponding position of the enumeration of the aforementioned datetime index. The datetimes which correspond to each position are written in the metadata as integer nanoseconds since the epoch into the `minute_index` key. See Also -------- zipline.data.minute_bars.BcolzMinuteBarReader """ COL_NAMES = ('open', 'high', 'low', 'close', 'volume') def __init__(self, rootdir, calendar, start_session, end_session, minutes_per_day, default_ohlc_ratio=OHLC_RATIO, ohlc_ratios_per_sid=None, expectedlen=DEFAULT_EXPECTEDLEN, write_metadata=True): self._rootdir = rootdir self._start_session = start_session self._end_session = end_session self._calendar = calendar slicer = ( calendar.schedule.index.slice_indexer(start_session, end_session)) self._schedule = calendar.schedule[slicer] self._session_labels = self._schedule.index self._minutes_per_day = minutes_per_day self._expectedlen = expectedlen self._default_ohlc_ratio = default_ohlc_ratio self._ohlc_ratios_per_sid = ohlc_ratios_per_sid self._minute_index = _calc_minute_index( self._schedule.market_open, self._minutes_per_day) if write_metadata: metadata = BcolzMinuteBarMetadata( self._default_ohlc_ratio, self._ohlc_ratios_per_sid, self._calendar, self._start_session, self._end_session, self._minutes_per_day, ) metadata.write(self._rootdir) @classmethod def open(cls, rootdir, end_session=None): """ Open an existing ``rootdir`` for writing. Parameters ---------- end_session : Timestamp (optional) When appending, the intended new ``end_session``. """ metadata = BcolzMinuteBarMetadata.read(rootdir) return BcolzMinuteBarWriter( rootdir, metadata.calendar, metadata.start_session, end_session if end_session is not None else metadata.end_session, metadata.minutes_per_day, metadata.default_ohlc_ratio, metadata.ohlc_ratios_per_sid, write_metadata=end_session is not None ) @property def first_trading_day(self): return self._start_session def ohlc_ratio_for_sid(self, sid): if self._ohlc_ratios_per_sid is not None: try: return self._ohlc_ratios_per_sid[sid] except KeyError: pass # If no ohlc_ratios_per_sid dict is passed, or if the specified # sid is not in the dict, fallback to the general ohlc_ratio. return self._default_ohlc_ratio def sidpath(self, sid): """ Parameters ---------- sid : int Asset identifier. Returns ------- out : string Full path to the bcolz rootdir for the given sid. """ sid_subdir = _sid_subdir_path(sid) return join(self._rootdir, sid_subdir) def last_date_in_output_for_sid(self, sid): """ Parameters ---------- sid : int Asset identifier. Returns ------- out : pd.Timestamp The midnight of the last date written in to the output for the given sid. """ sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid)) if not os.path.exists(sizes_path): return pd.NaT with open(sizes_path, mode='r') as f: sizes = f.read() data = json.loads(sizes) # use integer division so that the result is an int # for pandas index later https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/base.py#L247 # noqa num_days = data['shape'][0] // self._minutes_per_day if num_days == 0: # empty container return pd.NaT return self._session_labels[num_days - 1] def _init_ctable(self, path): """ Create empty ctable for given path. Parameters ---------- path : string The path to rootdir of the new ctable. """ # Only create the containing subdir on creation. # This is not to be confused with the `.bcolz` directory, but is the # directory up one level from the `.bcolz` directories. sid_containing_dirname = os.path.dirname(path) if not os.path.exists(sid_containing_dirname): # Other sids may have already created the containing directory. os.makedirs(sid_containing_dirname) initial_array = np.empty(0, np.uint32) table = ctable( rootdir=path, columns=[ initial_array, initial_array, initial_array, initial_array, initial_array, ], names=[ 'open', 'high', 'low', 'close', 'volume' ], expectedlen=self._expectedlen, mode='w', ) table.flush() return table def _ensure_ctable(self, sid): """Ensure that a ctable exists for ``sid``, then return it.""" sidpath = self.sidpath(sid) if not os.path.exists(sidpath): return self._init_ctable(sidpath) return bcolz.ctable(rootdir=sidpath, mode='a') def _zerofill(self, table, numdays): # Compute the number of minutes to be filled, accounting for the # possibility of a partial day's worth of minutes existing for # the previous day. minute_offset = len(table) % self._minutes_per_day num_to_prepend = numdays * self._minutes_per_day - minute_offset prepend_array = np.zeros(num_to_prepend, np.uint32) # Fill all OHLCV with zeros. table.append([prepend_array] * 5) table.flush() def pad(self, sid, date): """ Fill sid container with empty data through the specified date. If the last recorded trade is not at the close, then that day will be padded with zeros until its close. Any day after that (up to and including the specified date) will be padded with `minute_per_day` worth of zeros Parameters ---------- sid : int The asset identifier for the data being written. date : datetime-like The date used to calculate how many slots to be pad. The padding is done through the date, i.e. after the padding is done the `last_date_in_output_for_sid` will be equal to `date` """ table = self._ensure_ctable(sid) last_date = self.last_date_in_output_for_sid(sid) tds = self._session_labels if date <= last_date or date < tds[0]: # No need to pad. return if pd.isnull(last_date): # If there is no data, determine how many days to add so that # desired days are written to the correct slots. days_to_zerofill = tds[tds.slice_indexer(end=date)] else: days_to_zerofill = tds[tds.slice_indexer( start=last_date + tds.freq, end=date)] self._zerofill(table, len(days_to_zerofill)) new_last_date = self.last_date_in_output_for_sid(sid) assert new_last_date == date, "new_last_date={0} != date={1}".format( new_last_date, date) def set_sid_attrs(self, sid, **kwargs): """Write all the supplied kwargs as attributes of the sid's file. """ table = self._ensure_ctable(sid) for k, v in kwargs.items(): table.attrs[k] = v def write(self, data, show_progress=False, invalid_data_behavior='warn'): """Write a stream of minute data. Parameters ---------- data : iterable[(int, pd.DataFrame)] The data to write. Each element should be a tuple of sid, data where data has the following format: columns : ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 index : DatetimeIndex of market minutes. A given sid may appear more than once in ``data``; however, the dates must be strictly increasing. show_progress : bool, optional Whether or not to show a progress bar while writing. """ ctx = maybe_show_progress( data, show_progress=show_progress, item_show_func=lambda e: e if e is None else str(e[0]), label="Merging minute equity files:", ) write_sid = self.write_sid with ctx as it: for e in it: write_sid(*e, invalid_data_behavior=invalid_data_behavior) def write_sid(self, sid, df, invalid_data_behavior='warn'): """ Write the OHLCV data for the given sid. If there is no bcolz ctable yet created for the sid, create it. If the length of the bcolz ctable is not exactly to the date before the first day provided, fill the ctable with 0s up to that date. Parameters ---------- sid : int The asset identifer for the data being written. df : pd.DataFrame DataFrame of market data with the following characteristics. columns : ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 index : DatetimeIndex of market minutes. """ cols = { 'open': df.open.values, 'high': df.high.values, 'low': df.low.values, 'close': df.close.values, 'volume': df.volume.values, } dts = df.index.values # Call internal method, since DataFrame has already ensured matching # index and value lengths. self._write_cols(sid, dts, cols, invalid_data_behavior) def write_cols(self, sid, dts, cols, invalid_data_behavior='warn'): """ Write the OHLCV data for the given sid. If there is no bcolz ctable yet created for the sid, create it. If the length of the bcolz ctable is not exactly to the date before the first day provided, fill the ctable with 0s up to that date. Parameters ---------- sid : int The asset identifier for the data being written. dts : datetime64 array The dts corresponding to values in cols. cols : dict of str -> np.array dict of market data with the following characteristics. keys are ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 """ if not all(len(dts) == len(cols[name]) for name in self.COL_NAMES): raise BcolzMinuteWriterColumnMismatch( "Length of dts={0} should match cols: {1}".format( len(dts), " ".join("{0}={1}".format(name, len(cols[name])) for name in self.COL_NAMES))) self._write_cols(sid, dts, cols, invalid_data_behavior) def _write_cols(self, sid, dts, cols, invalid_data_behavior): """ Internal method for `write_cols` and `write`. Parameters ---------- sid : int The asset identifier for the data being written. dts : datetime64 array The dts corresponding to values in cols. cols : dict of str -> np.array dict of market data with the following characteristics. keys are ('open', 'high', 'low', 'close', 'volume') open : float64 high : float64 low : float64 close : float64 volume : float64|int64 """ table = self._ensure_ctable(sid) tds = self._session_labels input_first_day = self._calendar.minute_to_session_label( pd.Timestamp(dts[0]), direction='previous') last_date = self.last_date_in_output_for_sid(sid) day_before_input = input_first_day - tds.freq self.pad(sid, day_before_input) table = self._ensure_ctable(sid) # Get the number of minutes already recorded in this sid's ctable num_rec_mins = table.size all_minutes = self._minute_index # Get the latest minute we wish to write to the ctable last_minute_to_write = pd.Timestamp(dts[-1]) if not last_minute_to_write.tzname(): last_minute_to_write = last_minute_to_write.tz_localize('utc') # In the event that we've already written some minutely data to the # ctable, guard against overwriting that data. if num_rec_mins > 0: last_recorded_minute = all_minutes[num_rec_mins - 1] if last_minute_to_write <= last_recorded_minute: raise BcolzMinuteOverlappingData(dedent(""" Data with last_date={0} already includes input start={1} for sid={2}""".strip()).format(last_date, input_first_day, sid)) latest_min_count = all_minutes.get_loc(last_minute_to_write) # Get all the minutes we wish to write (all market minutes after the # latest currently written, up to and including last_minute_to_write) all_minutes_in_window = all_minutes[num_rec_mins:latest_min_count + 1] minutes_count = all_minutes_in_window.size open_col = np.zeros(minutes_count, dtype=np.uint32) high_col = np.zeros(minutes_count, dtype=np.uint32) low_col = np.zeros(minutes_count, dtype=np.uint32) close_col = np.zeros(minutes_count, dtype=np.uint32) vol_col = np.zeros(minutes_count, dtype=np.uint32) dt_ixs = np.searchsorted(all_minutes_in_window.values, dts.astype('datetime64[ns]')) ohlc_ratio = self.ohlc_ratio_for_sid(sid) ( open_col[dt_ixs], high_col[dt_ixs], low_col[dt_ixs], close_col[dt_ixs], vol_col[dt_ixs], ) = convert_cols(cols, ohlc_ratio, sid, invalid_data_behavior) table.append([ open_col, high_col, low_col, close_col, vol_col ]) table.flush() def data_len_for_day(self, day): """ Return the number of data points up to and including the provided day. """ day_ix = self._session_labels.get_loc(day) # Add one to the 0-indexed day_ix to get the number of days. num_days = day_ix + 1 return num_days * self._minutes_per_day def truncate(self, date): """Truncate data beyond this date in all ctables.""" truncate_slice_end = self.data_len_for_day(date) glob_path = os.path.join(self._rootdir, "*", "*", "*.bcolz") sid_paths = sorted(glob(glob_path)) for sid_path in sid_paths: file_name = os.path.basename(sid_path) try: table = bcolz.open(rootdir=sid_path) except IOError: continue if table.len <= truncate_slice_end: logger.info("{0} not past truncate date={1}.", file_name, date) continue logger.info( "Truncating {0} at end_date={1}", file_name, date.date() ) table.resize(truncate_slice_end) # Update end session in metadata. metadata = BcolzMinuteBarMetadata.read(self._rootdir) metadata.end_session = date metadata.write(self._rootdir) class BcolzMinuteBarReader(MinuteBarReader): """ Reader for data written by BcolzMinuteBarWriter Parameters ---------- rootdir : string The root directory containing the metadata and asset bcolz directories. See Also -------- zipline.data.minute_bars.BcolzMinuteBarWriter """ FIELDS = ('open', 'high', 'low', 'close', 'volume') DEFAULT_MINUTELY_SID_CACHE_SIZES = { 'close': 3000, 'open': 1550, 'high': 1550, 'low': 1550, 'volume': 1550, } assert set(FIELDS) == set(DEFAULT_MINUTELY_SID_CACHE_SIZES), \ "FIELDS should match DEFAULT_MINUTELY_SID_CACHE_SIZES keys" # Wrap the defaults in proxy so that we don't accidentally mutate them in # place in the constructor. If a user wants to change the defaults, they # can do so by mutating DEFAULT_MINUTELY_SID_CACHE_SIZES. _default_proxy = mappingproxy(DEFAULT_MINUTELY_SID_CACHE_SIZES) def __init__(self, rootdir, sid_cache_sizes=_default_proxy): self._rootdir = rootdir metadata = self._get_metadata() self._start_session = metadata.start_session self._end_session = metadata.end_session self.calendar = metadata.calendar slicer = self.calendar.schedule.index.slice_indexer( self._start_session, self._end_session, ) self._schedule = self.calendar.schedule[slicer] self._market_opens = self._schedule.market_open self._market_open_values = self._market_opens.values.\ astype('datetime64[m]').astype(np.int64) self._market_closes = self._schedule.market_close self._market_close_values = self._market_closes.values.\ astype('datetime64[m]').astype(np.int64) self._default_ohlc_inverse = 1.0 / metadata.default_ohlc_ratio ohlc_ratios = metadata.ohlc_ratios_per_sid if ohlc_ratios: self._ohlc_inverses_per_sid = ( valmap(lambda x: 1.0 / x, ohlc_ratios)) else: self._ohlc_inverses_per_sid = None self._minutes_per_day = metadata.minutes_per_day self._carrays = { field: LRU(sid_cache_sizes[field]) for field in self.FIELDS } self._last_get_value_dt_position = None self._last_get_value_dt_value = None # This is to avoid any bad data or other performance-killing situation # where there a consecutive streak of 0 (no volume) starting at an # asset's start date. # if asset 1 started on 2015-01-03 but its first trade is 2015-01-06 # 10:31 AM US/Eastern, this dict would store {1: 23675971}, # which is the minute epoch of that date. self._known_zero_volume_dict = {} def _get_metadata(self): return BcolzMinuteBarMetadata.read(self._rootdir) @property def trading_calendar(self): return self.calendar @lazyval def last_available_dt(self): _, close = self.calendar.open_and_close_for_session(self._end_session) return close @property def first_trading_day(self): return self._start_session def _ohlc_ratio_inverse_for_sid(self, sid): if self._ohlc_inverses_per_sid is not None: try: return self._ohlc_inverses_per_sid[sid] except KeyError: pass # If we can not get a sid-specific OHLC inverse for this sid, # fallback to the default. return self._default_ohlc_inverse def _minutes_to_exclude(self): """ Calculate the minutes which should be excluded when a window occurs on days which had an early close, i.e. days where the close based on the regular period of minutes per day and the market close do not match. Returns ------- List of DatetimeIndex representing the minutes to exclude because of early closes. """ market_opens = self._market_opens.values.astype('datetime64[m]') market_closes = self._market_closes.values.astype('datetime64[m]') minutes_per_day = (market_closes - market_opens).astype(np.int64) early_indices = np.where( minutes_per_day != self._minutes_per_day - 1)[0] early_opens = self._market_opens[early_indices] early_closes = self._market_closes[early_indices] minutes = [(market_open, early_close) for market_open, early_close in zip(early_opens, early_closes)] return minutes @lazyval def _minute_exclusion_tree(self): """ Build an interval tree keyed by the start and end of each range of positions should be dropped from windows. (These are the minutes between an early close and the minute which would be the close based on the regular period if there were no early close.) The value of each node is the same start and end position stored as a tuple. The data is stored as such in support of a fast answer to the question, does a given start and end position overlap any of the exclusion spans? Returns ------- IntervalTree containing nodes which represent the minutes to exclude because of early closes. """ itree = IntervalTree() for market_open, early_close in self._minutes_to_exclude(): start_pos = self._find_position_of_minute(early_close) + 1 end_pos = ( self._find_position_of_minute(market_open) + self._minutes_per_day - 1 ) data = (start_pos, end_pos) itree[start_pos:end_pos + 1] = data return itree def _exclusion_indices_for_range(self, start_idx, end_idx): """ Returns ------- List of tuples of (start, stop) which represent the ranges of minutes which should be excluded when a market minute window is requested. """ itree = self._minute_exclusion_tree if itree.overlaps(start_idx, end_idx): ranges = [] intervals = itree[start_idx:end_idx] for interval in intervals: ranges.append(interval.data) return sorted(ranges) else: return None def _get_carray_path(self, sid, field): sid_subdir = _sid_subdir_path(sid) # carrays are subdirectories of the sid's rootdir return os.path.join(self._rootdir, sid_subdir, field) def _open_minute_file(self, field, sid): sid = int(sid) try: carray = self._carrays[field][sid] except KeyError: try: carray = self._carrays[field][sid] = bcolz.carray( rootdir=self._get_carray_path(sid, field), mode='r', ) except IOError: raise NoDataForSid('No minute data for sid {}.'.format(sid)) return carray def table_len(self, sid): """Returns the length of the underlying table for this sid.""" return len(self._open_minute_file('close', sid)) def get_sid_attr(self, sid, name): sid_subdir = _sid_subdir_path(sid) sid_path = os.path.join(self._rootdir, sid_subdir) attrs = bcolz.attrs.attrs(sid_path, 'r') try: return attrs[name] except KeyError: return None def get_value(self, sid, dt, field): """ Retrieve the pricing info for the given sid, dt, and field. Parameters ---------- sid : int Asset identifier. dt : datetime-like The datetime at which the trade occurred. field : string The type of pricing data to retrieve. ('open', 'high', 'low', 'close', 'volume') Returns ------- out : float|int The market data for the given sid, dt, and field coordinates. For OHLC: Returns a float if a trade occurred at the given dt. If no trade occurred, a np.nan is returned. For volume: Returns the integer value of the volume. (A volume of 0 signifies no trades for the given dt.) """ if self._last_get_value_dt_value == dt.value: minute_pos = self._last_get_value_dt_position else: try: minute_pos = self._find_position_of_minute(dt) except ValueError: raise NoDataOnDate() self._last_get_value_dt_value = dt.value self._last_get_value_dt_position = minute_pos try: value = self._open_minute_file(field, sid)[minute_pos] except IndexError: value = 0 if value == 0: if field == 'volume': return 0 else: return np.nan if field != 'volume': value *= self._ohlc_ratio_inverse_for_sid(sid) return value def get_last_traded_dt(self, asset, dt): minute_pos = self._find_last_traded_position(asset, dt) if minute_pos == -1: return pd.NaT return self._pos_to_minute(minute_pos) def _find_last_traded_position(self, asset, dt): volumes = self._open_minute_file('volume', asset) start_date_minute = asset.start_date.value / NANOS_IN_MINUTE dt_minute = dt.value / NANOS_IN_MINUTE try: # if we know of a dt before which this asset has no volume, # don't look before that dt earliest_dt_to_search = self._known_zero_volume_dict[asset.sid] except KeyError: earliest_dt_to_search = start_date_minute if dt_minute < earliest_dt_to_search: return -1 pos = find_last_traded_position_internal( self._market_open_values, self._market_close_values, dt_minute, earliest_dt_to_search, volumes, self._minutes_per_day, ) if pos == -1: # if we didn't find any volume before this dt, save it to avoid # work in the future. try: self._known_zero_volume_dict[asset.sid] = max( dt_minute, self._known_zero_volume_dict[asset.sid] ) except KeyError: self._known_zero_volume_dict[asset.sid] = dt_minute return pos def _pos_to_minute(self, pos): minute_epoch = minute_value( self._market_open_values, pos, self._minutes_per_day ) return pd.Timestamp(minute_epoch, tz='UTC', unit="m") def _find_position_of_minute(self, minute_dt): """ Internal method that returns the position of the given minute in the list of every trading minute since market open of the first trading day. Adjusts non market minutes to the last close. ex. this method would return 1 for 2002-01-02 9:32 AM Eastern, if 2002-01-02 is the first trading day of the dataset. Parameters ---------- minute_dt: pd.Timestamp The minute whose position should be calculated. Returns ------- int: The position of the given minute in the list of all trading minutes since market open on the first trading day. """ return find_position_of_minute( self._market_open_values, self._market_close_values, minute_dt.value / NANOS_IN_MINUTE, self._minutes_per_day, False, ) def load_raw_arrays(self, fields, start_dt, end_dt, sids): """ Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ start_idx = self._find_position_of_minute(start_dt) end_idx = self._find_position_of_minute(end_dt) num_minutes = (end_idx - start_idx + 1) results = [] indices_to_exclude = self._exclusion_indices_for_range( start_idx, end_idx) if indices_to_exclude is not None: for excl_start, excl_stop in indices_to_exclude: length = excl_stop - excl_start + 1 num_minutes -= length shape = num_minutes, len(sids) for field in fields: if field != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) for i, sid in enumerate(sids): carray = self._open_minute_file(field, sid) values = carray[start_idx:end_idx + 1] if indices_to_exclude is not None: for excl_start, excl_stop in indices_to_exclude[::-1]: excl_slice = np.s_[ excl_start - start_idx:excl_stop - start_idx + 1] values = np.delete(values, excl_slice) where = values != 0 # first slice down to len(where) because we might not have # written data for all the minutes requested if field != 'volume': out[:len(where), i][where] = ( values[where] * self._ohlc_ratio_inverse_for_sid(sid)) else: out[:len(where), i][where] = values[where] results.append(out) return results class MinuteBarUpdateReader(with_metaclass(ABCMeta, object)): """ Abstract base class for minute update readers. """ @abstractmethod def read(self, dts, sids): """ Read and return pricing update data. Parameters ---------- dts : DatetimeIndex The minutes for which to read the pricing updates. sids : iter[int] The sids for which to read the pricing updates. Returns ------- data : iter[(int, DataFrame)] Returns an iterable of ``sid`` to the corresponding OHLCV data. """ raise NotImplementedError() class H5MinuteBarUpdateWriter(object): """ Writer for files containing minute bar updates for consumption by a writer for a ``MinuteBarReader`` format. Parameters ---------- path : str The destination path. complevel : int, optional The HDF5 complevel, defaults to ``5``. complib : str, optional The HDF5 complib, defaults to ``zlib``. """ FORMAT_VERSION = 0 _COMPLEVEL = 5 _COMPLIB = 'zlib' def __init__(self, path, complevel=None, complib=None): self._complevel = complevel if complevel \ is not None else self._COMPLEVEL self._complib = complib if complib \ is not None else self._COMPLIB self._path = path def write(self, frames): """ Write the frames to the target HDF5 file, using the format used by ``pd.Panel.to_hdf`` Parameters ---------- frames : iter[(int, DataFrame)] or dict[int -> DataFrame] An iterable or other mapping of sid to the corresponding OHLCV pricing data. """ with HDFStore(self._path, 'w', complevel=self._complevel, complib=self._complib) \ as store: panel = pd.concat(dict(frames), axis=1) panel.to_hdf(store, 'updates') with tables.open_file(self._path, mode='r+') as h5file: h5file.set_node_attr('/', 'version', 0) class H5MinuteBarUpdateReader(MinuteBarUpdateReader): """ Reader for minute bar updates stored in HDF5 files. Parameters ---------- path : str The path of the HDF5 file from which to source data. """ def __init__(self, path): try: self._panel = pd.read_hdf(path) return except TypeError: pass # There is a bug in `pandas.read_hdf` whereby in Python 3 it fails to # read the timezone attr of an h5 file if that file was written in # Python 2. Until zipline has dropped Python 2 entirely we are at risk # of hitting this issue. For now, use h5py to read the file instead. # The downside of using h5py directly is that we need to interpret the # attrs manually when creating our panel (specifically the tz attr), # but since we know exactly how the file was written this should be # pretty straightforward. with h5py.File(path, 'r') as f: updates = f['updates'] values = updates['block0_values'] items = updates['axis0'] major = updates['axis1'] minor = updates['axis2'] # Our current version of h5py is unable to read the tz attr in the # tests as it was written by HDFStore. This is fixed in version # 2.10.0 of h5py, but that requires >=Python3.7 on conda, so until # then we should be safe to assume UTC. try: tz = major.attrs['tz'].decode() except OSError: tz = 'UTC' self._panel = pd.Panel( data=np.array(values).T, items=np.array(items), major_axis=pd.DatetimeIndex(major, tz=tz, freq='T'), minor_axis=np.array(minor).astype('U'), ) def read(self, dts, sids): result = [] for sid in sids: result.append((sid, self._panel[sid].loc[dts])) return iter(result)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/minute_bars.py
minute_bars.py
from operator import mul from logbook import Logger import numpy as np from numpy import float64, int64, nan import pandas as pd from pandas import isnull from six import iteritems from six.moves import reduce from zipline.assets import ( Asset, AssetConvertible, Equity, Future, PricingDataAssociable, ) from zipline.assets.continuous_futures import ContinuousFuture from zipline.data.continuous_future_reader import ( ContinuousFutureSessionBarReader, ContinuousFutureMinuteBarReader ) from zipline.assets.roll_finder import ( CalendarRollFinder, VolumeRollFinder ) from zipline.data.dispatch_bar_reader import ( AssetDispatchMinuteBarReader, AssetDispatchSessionBarReader ) from zipline.data.resample import ( DailyHistoryAggregator, ReindexMinuteBarReader, ReindexSessionBarReader, ) from zipline.data.history_loader import ( DailyHistoryLoader, MinuteHistoryLoader, ) from zipline.data.bar_reader import NoDataOnDate from zipline.utils.math_utils import ( nansum, nanmean, nanstd ) from zipline.utils.memoize import remember_last, weak_lru_cache from zipline.utils.pandas_utils import ( normalize_date, timedelta_to_integral_minutes, ) from zipline.errors import HistoryWindowStartsBeforeData log = Logger('DataPortal') BASE_FIELDS = frozenset([ "open", "high", "low", "close", "volume", "price", "contract", "sid", "last_traded", ]) OHLCV_FIELDS = frozenset([ "open", "high", "low", "close", "volume" ]) OHLCVP_FIELDS = frozenset([ "open", "high", "low", "close", "volume", "price" ]) HISTORY_FREQUENCIES = set(["1m", "1d"]) DEFAULT_MINUTE_HISTORY_PREFETCH = 1560 DEFAULT_DAILY_HISTORY_PREFETCH = 40 _DEF_M_HIST_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH _DEF_D_HIST_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH class DataPortal(object): """Interface to all of the data that a zipline simulation needs. This is used by the simulation runner to answer questions about the data, like getting the prices of assets on a given day or to service history calls. Parameters ---------- asset_finder : zipline.assets.assets.AssetFinder The AssetFinder instance used to resolve assets. trading_calendar: zipline.utils.calendar.exchange_calendar.TradingCalendar The calendar instance used to provide minute->session information. first_trading_day : pd.Timestamp The first trading day for the simulation. equity_daily_reader : BcolzDailyBarReader, optional The daily bar reader for equities. This will be used to service daily data backtests or daily history calls in a minute backetest. If a daily bar reader is not provided but a minute bar reader is, the minutes will be rolled up to serve the daily requests. equity_minute_reader : BcolzMinuteBarReader, optional The minute bar reader for equities. This will be used to service minute data backtests or minute history calls. This can be used to serve daily calls if no daily bar reader is provided. future_daily_reader : BcolzDailyBarReader, optional The daily bar ready for futures. This will be used to service daily data backtests or daily history calls in a minute backetest. If a daily bar reader is not provided but a minute bar reader is, the minutes will be rolled up to serve the daily requests. future_minute_reader : BcolzFutureMinuteBarReader, optional The minute bar reader for futures. This will be used to service minute data backtests or minute history calls. This can be used to serve daily calls if no daily bar reader is provided. adjustment_reader : SQLiteAdjustmentWriter, optional The adjustment reader. This is used to apply splits, dividends, and other adjustment data to the raw data from the readers. last_available_session : pd.Timestamp, optional The last session to make available in session-level data. last_available_minute : pd.Timestamp, optional The last minute to make available in minute-level data. """ def __init__(self, asset_finder, trading_calendar, first_trading_day, equity_daily_reader=None, equity_minute_reader=None, future_daily_reader=None, future_minute_reader=None, adjustment_reader=None, last_available_session=None, last_available_minute=None, minute_history_prefetch_length=_DEF_M_HIST_PREFETCH, daily_history_prefetch_length=_DEF_D_HIST_PREFETCH): self.trading_calendar = trading_calendar self.asset_finder = asset_finder self._adjustment_reader = adjustment_reader # caches of sid -> adjustment list self._splits_dict = {} self._mergers_dict = {} self._dividends_dict = {} # Handle extra sources, like Fetcher. self._augmented_sources_map = {} self._extra_source_df = None self._first_available_session = first_trading_day if last_available_session: self._last_available_session = last_available_session else: # Infer the last session from the provided readers. last_sessions = [ reader.last_available_dt for reader in [equity_daily_reader, future_daily_reader] if reader is not None ] if last_sessions: self._last_available_session = min(last_sessions) else: self._last_available_session = None if last_available_minute: self._last_available_minute = last_available_minute else: # Infer the last minute from the provided readers. last_minutes = [ reader.last_available_dt for reader in [equity_minute_reader, future_minute_reader] if reader is not None ] if last_minutes: self._last_available_minute = max(last_minutes) else: self._last_available_minute = None aligned_equity_minute_reader = self._ensure_reader_aligned( equity_minute_reader) aligned_equity_session_reader = self._ensure_reader_aligned( equity_daily_reader) aligned_future_minute_reader = self._ensure_reader_aligned( future_minute_reader) aligned_future_session_reader = self._ensure_reader_aligned( future_daily_reader) self._roll_finders = { 'calendar': CalendarRollFinder(self.trading_calendar, self.asset_finder), } aligned_minute_readers = {} aligned_session_readers = {} if aligned_equity_minute_reader is not None: aligned_minute_readers[Equity] = aligned_equity_minute_reader if aligned_equity_session_reader is not None: aligned_session_readers[Equity] = aligned_equity_session_reader if aligned_future_minute_reader is not None: aligned_minute_readers[Future] = aligned_future_minute_reader aligned_minute_readers[ContinuousFuture] = \ ContinuousFutureMinuteBarReader( aligned_future_minute_reader, self._roll_finders, ) if aligned_future_session_reader is not None: aligned_session_readers[Future] = aligned_future_session_reader self._roll_finders['volume'] = VolumeRollFinder( self.trading_calendar, self.asset_finder, aligned_future_session_reader, ) aligned_session_readers[ContinuousFuture] = \ ContinuousFutureSessionBarReader( aligned_future_session_reader, self._roll_finders, ) _dispatch_minute_reader = AssetDispatchMinuteBarReader( self.trading_calendar, self.asset_finder, aligned_minute_readers, self._last_available_minute, ) _dispatch_session_reader = AssetDispatchSessionBarReader( self.trading_calendar, self.asset_finder, aligned_session_readers, self._last_available_session, ) self._pricing_readers = { 'minute': _dispatch_minute_reader, 'daily': _dispatch_session_reader, } self._daily_aggregator = DailyHistoryAggregator( self.trading_calendar.schedule.market_open, _dispatch_minute_reader, self.trading_calendar ) self._history_loader = DailyHistoryLoader( self.trading_calendar, _dispatch_session_reader, self._adjustment_reader, self.asset_finder, self._roll_finders, prefetch_length=daily_history_prefetch_length, ) self._minute_history_loader = MinuteHistoryLoader( self.trading_calendar, _dispatch_minute_reader, self._adjustment_reader, self.asset_finder, self._roll_finders, prefetch_length=minute_history_prefetch_length, ) self._first_trading_day = str(first_trading_day.date()) # Get the first trading minute self._first_trading_minute, _ = ( self.trading_calendar.open_and_close_for_session( self._first_trading_day ) if self._first_trading_day is not None else (None, None) ) # Store the locs of the first day and first minute self._first_trading_day_loc = ( self.trading_calendar.all_sessions.get_loc(self._first_trading_day) if self._first_trading_day is not None else None ) def _ensure_reader_aligned(self, reader): if reader is None: return if reader.trading_calendar.name == self.trading_calendar.name: return reader elif reader.data_frequency == 'minute': return ReindexMinuteBarReader( self.trading_calendar, reader, self._first_available_session, self._last_available_session ) elif reader.data_frequency == 'session': return ReindexSessionBarReader( self.trading_calendar, reader, self._first_available_session, self._last_available_session ) def _reindex_extra_source(self, df, source_date_index): return df.reindex(index=source_date_index, method='ffill') def handle_extra_source(self, source_df, sim_params): """ Extra sources always have a sid column. We expand the given data (by forward filling) to the full range of the simulation dates, so that lookup is fast during simulation. """ if source_df is None: return # Normalize all the dates in the df source_df.index = source_df.index.normalize() # source_df's sid column can either consist of assets we know about # (such as sid(24)) or of assets we don't know about (such as # palladium). # # In both cases, we break up the dataframe into individual dfs # that only contain a single asset's information. ie, if source_df # has data for PALLADIUM and GOLD, we split source_df into two # dataframes, one for each. (same applies if source_df has data for # AAPL and IBM). # # We then take each child df and reindex it to the simulation's date # range by forward-filling missing values. this makes reads simpler. # # Finally, we store the data. For each column, we store a mapping in # self.augmented_sources_map from the column to a dictionary of # asset -> df. In other words, # self.augmented_sources_map['days_to_cover']['AAPL'] gives us the df # holding that data. source_date_index = self.trading_calendar.sessions_in_range( sim_params.start_session, sim_params.end_session ) # Break the source_df up into one dataframe per sid. This lets # us (more easily) calculate accurate start/end dates for each sid, # de-dup data, and expand the data to fit the backtest start/end date. grouped_by_sid = source_df.groupby(["sid"]) group_names = grouped_by_sid.groups.keys() group_dict = {} for group_name in group_names: group_dict[group_name] = grouped_by_sid.get_group(group_name) # This will be the dataframe which we query to get fetcher assets at # any given time. Get's overwritten every time there's a new fetcher # call extra_source_df = pd.DataFrame() for identifier, df in iteritems(group_dict): # Since we know this df only contains a single sid, we can safely # de-dupe by the index (dt). If minute granularity, will take the # last data point on any given day df = df.groupby(level=0).last() # Reindex the dataframe based on the backtest start/end date. # This makes reads easier during the backtest. df = self._reindex_extra_source(df, source_date_index) for col_name in df.columns.difference(['sid']): if col_name not in self._augmented_sources_map: self._augmented_sources_map[col_name] = {} self._augmented_sources_map[col_name][identifier] = df # Append to extra_source_df the reindexed dataframe for the single # sid extra_source_df = extra_source_df.append(df) self._extra_source_df = extra_source_df def _get_pricing_reader(self, data_frequency): return self._pricing_readers[data_frequency] def get_last_traded_dt(self, asset, dt, data_frequency): """ Given an asset and dt, returns the last traded dt from the viewpoint of the given dt. If there is a trade on the dt, the answer is dt provided. """ return self._get_pricing_reader(data_frequency).get_last_traded_dt( asset, dt) @staticmethod def _is_extra_source(asset, field, map): """ Internal method that determines if this asset/field combination represents a fetcher value or a regular OHLCVP lookup. """ # If we have an extra source with a column called "price", only look # at it if it's on something like palladium and not AAPL (since our # own price data always wins when dealing with assets). return not (field in BASE_FIELDS and (isinstance(asset, (Asset, ContinuousFuture)))) def _get_fetcher_value(self, asset, field, dt): day = normalize_date(dt) try: return \ self._augmented_sources_map[field][asset].loc[day, field] except KeyError: return np.NaN def _get_single_asset_value(self, session_label, asset, field, dt, data_frequency): if self._is_extra_source( asset, field, self._augmented_sources_map): return self._get_fetcher_value(asset, field, dt) if field not in BASE_FIELDS: raise KeyError("Invalid column: " + str(field)) if dt < asset.start_date or \ (data_frequency == "daily" and session_label > asset.end_date) or \ (data_frequency == "minute" and session_label > asset.end_date): if field == "volume": return 0 elif field == "contract": return None elif field != "last_traded": return np.NaN if data_frequency == "daily": if field == "contract": return self._get_current_contract(asset, session_label) else: return self._get_daily_spot_value( asset, field, session_label, ) else: if field == "last_traded": return self.get_last_traded_dt(asset, dt, 'minute') elif field == "price": return self._get_minute_spot_value( asset, "close", dt, ffill=True, ) elif field == "contract": return self._get_current_contract(asset, dt) else: return self._get_minute_spot_value(asset, field, dt) def get_spot_value(self, assets, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset, ContinuousFuture, or iterable of same. The asset or assets whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ assets_is_scalar = False if isinstance(assets, (AssetConvertible, PricingDataAssociable)): assets_is_scalar = True else: # If 'assets' was not one of the expected types then it should be # an iterable. try: iter(assets) except TypeError: raise TypeError( "Unexpected 'assets' value of type {}." .format(type(assets)) ) session_label = self.trading_calendar.minute_to_session_label(dt) if assets_is_scalar: return self._get_single_asset_value( session_label, assets, field, dt, data_frequency, ) else: get_single_asset_value = self._get_single_asset_value return [ get_single_asset_value( session_label, asset, field, dt, data_frequency, ) for asset in assets ] def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset The asset or assets whose data is desired. This cannot be an arbitrary AssetConvertible. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ return self._get_single_asset_value( self.trading_calendar.minute_to_session_label(dt), asset, field, dt, data_frequency, ) def get_adjustments(self, assets, field, dt, perspective_dt): """ Returns a list of adjustments between the dt and perspective_dt for the given field and list of assets Parameters ---------- assets : list of type Asset, or Asset The asset, or assets whose adjustments are desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. Returns ------- adjustments : list[Adjustment] The adjustments to that field. """ if isinstance(assets, Asset): assets = [assets] adjustment_ratios_per_asset = [] def split_adj_factor(x): return x if field != 'volume' else 1.0 / x for asset in assets: adjustments_for_asset = [] split_adjustments = self._get_adjustment_list( asset, self._splits_dict, "SPLITS" ) for adj_dt, adj in split_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(split_adj_factor(adj)) elif adj_dt > perspective_dt: break if field != 'volume': merger_adjustments = self._get_adjustment_list( asset, self._mergers_dict, "MERGERS" ) for adj_dt, adj in merger_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break dividend_adjustments = self._get_adjustment_list( asset, self._dividends_dict, "DIVIDENDS", ) for adj_dt, adj in dividend_adjustments: if dt < adj_dt <= perspective_dt: adjustments_for_asset.append(adj) elif adj_dt > perspective_dt: break ratio = reduce(mul, adjustments_for_asset, 1.0) adjustment_ratios_per_asset.append(ratio) return adjustment_ratios_per_asset def get_adjusted_value(self, asset, field, dt, perspective_dt, data_frequency, spot_value=None): """ Returns a scalar value representing the value of the desired asset's field at the given dt with adjustments applied. Parameters ---------- asset : Asset The asset whose data is desired. field : {'open', 'high', 'low', 'close', 'volume', \ 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. perspective_dt : pd.Timestamp The timestamp from which the data is being viewed back from. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The value of the given ``field`` for ``asset`` at ``dt`` with any adjustments known by ``perspective_dt`` applied. The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if spot_value is None: # if this a fetcher field, we want to use perspective_dt (not dt) # because we want the new value as of midnight (fetcher only works # on a daily basis, all timestamps are on midnight) if self._is_extra_source(asset, field, self._augmented_sources_map): spot_value = self.get_spot_value(asset, field, perspective_dt, data_frequency) else: spot_value = self.get_spot_value(asset, field, dt, data_frequency) if isinstance(asset, Equity): ratio = self.get_adjustments(asset, field, dt, perspective_dt)[0] spot_value *= ratio return spot_value def _get_minute_spot_value(self, asset, column, dt, ffill=False): reader = self._get_pricing_reader('minute') if not ffill: try: return reader.get_value(asset.sid, dt, column) except NoDataOnDate: if column != 'volume': return np.nan else: return 0 # At this point the pairing of column='close' and ffill=True is # assumed. try: # Optimize the best case scenario of a liquid asset # returning a valid price. result = reader.get_value(asset.sid, dt, column) if not pd.isnull(result): return result except NoDataOnDate: # Handling of no data for the desired date is done by the # forward filling logic. # The last trade may occur on a previous day. pass # If forward filling, we want the last minute with values (up to # and including dt). query_dt = reader.get_last_traded_dt(asset, dt) if pd.isnull(query_dt): # no last traded dt, bail return np.nan result = reader.get_value(asset.sid, query_dt, column) if (dt == query_dt) or (dt.date() == query_dt.date()): return result # the value we found came from a different day, so we have to # adjust the data if there are any adjustments on that day barrier return self.get_adjusted_value( asset, column, query_dt, dt, "minute", spot_value=result ) def _get_daily_spot_value(self, asset, column, dt): reader = self._get_pricing_reader('daily') if column == "last_traded": last_traded_dt = reader.get_last_traded_dt(asset, dt) if isnull(last_traded_dt): return pd.NaT else: return last_traded_dt elif column in OHLCV_FIELDS: # don't forward fill try: return reader.get_value(asset, dt, column) except NoDataOnDate: return np.nan elif column == "price": found_dt = dt while True: try: value = reader.get_value( asset, found_dt, "close" ) if not isnull(value): if dt == found_dt: return value else: # adjust if needed return self.get_adjusted_value( asset, column, found_dt, dt, "minute", spot_value=value ) else: found_dt -= self.trading_calendar.day except NoDataOnDate: return np.nan @remember_last def _get_days_for_window(self, end_date, bar_count): tds = self.trading_calendar.all_sessions end_loc = tds.get_loc(end_date) start_loc = end_loc - bar_count + 1 if start_loc < self._first_trading_day_loc: raise HistoryWindowStartsBeforeData( first_trading_day=self._first_trading_day, bar_count=bar_count, suggested_start_day=tds[ self._first_trading_day_loc + bar_count ].date(), ) return tds[start_loc:end_loc + 1] def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use, data_frequency): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """ session = self.trading_calendar.minute_to_session_label(end_dt) days_for_window = self._get_days_for_window(session, bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) data = self._get_history_daily_window_data( assets, days_for_window, end_dt, field_to_use, data_frequency ) return pd.DataFrame( data, index=days_for_window, columns=assets ) def _get_history_daily_window_data(self, assets, days_for_window, end_dt, field_to_use, data_frequency): if data_frequency == 'daily': # two cases where we use daily data for the whole range: # 1) the history window ends at midnight utc. # 2) the last desired day of the window is after the # last trading day, use daily data for the whole range. return self._get_daily_window_data( assets, field_to_use, days_for_window, extra_slot=False ) else: # minute mode, requesting '1d' daily_data = self._get_daily_window_data( assets, field_to_use, days_for_window[0:-1] ) if field_to_use == 'open': minute_value = self._daily_aggregator.opens( assets, end_dt) elif field_to_use == 'high': minute_value = self._daily_aggregator.highs( assets, end_dt) elif field_to_use == 'low': minute_value = self._daily_aggregator.lows( assets, end_dt) elif field_to_use == 'close': minute_value = self._daily_aggregator.closes( assets, end_dt) elif field_to_use == 'volume': minute_value = self._daily_aggregator.volumes( assets, end_dt) elif field_to_use == 'sid': minute_value = [ int(self._get_current_contract(asset, end_dt)) for asset in assets] # append the partial day. daily_data[-1] = minute_value return daily_data def _handle_minute_history_out_of_bounds(self, bar_count): cal = self.trading_calendar first_trading_minute_loc = ( cal.all_minutes.get_loc( self._first_trading_minute ) if self._first_trading_minute is not None else None ) suggested_start_day = cal.minute_to_session_label( cal.all_minutes[ first_trading_minute_loc + bar_count ] + cal.day ) raise HistoryWindowStartsBeforeData( first_trading_day=self._first_trading_day, bar_count=bar_count, suggested_start_day=suggested_start_day.date(), ) def _get_history_minute_window(self, assets, end_dt, bar_count, field_to_use): """ Internal method that returns a dataframe containing history bars of minute frequency for the given sids. """ # get all the minutes for this window try: minutes_for_window = self.trading_calendar.minutes_window( end_dt, -bar_count ) except KeyError: self._handle_minute_history_out_of_bounds(bar_count) if minutes_for_window[0] < self._first_trading_minute: self._handle_minute_history_out_of_bounds(bar_count) asset_minute_data = self._get_minute_window_data( assets, field_to_use, minutes_for_window, ) return pd.DataFrame( asset_minute_data, index=minutes_for_window, columns=assets ) def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): """ Public API method that returns a dataframe containing the requested history window. Data is fully adjusted. Parameters ---------- assets : list of zipline.data.Asset objects The assets whose data is desired. bar_count: int The number of bars desired. frequency: string "1d" or "1m" field: string The desired field of the asset. data_frequency: string The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars. ffill: boolean Forward-fill missing values. Only has effect if field is 'price'. Returns ------- A dataframe containing the requested data. """ if field not in OHLCVP_FIELDS and field != 'sid': raise ValueError("Invalid field: {0}".format(field)) if bar_count < 1: raise ValueError( "bar_count must be >= 1, but got {}".format(bar_count) ) if frequency == "1d": if field == "price": df = self._get_history_daily_window(assets, end_dt, bar_count, "close", data_frequency) else: df = self._get_history_daily_window(assets, end_dt, bar_count, field, data_frequency) elif frequency == "1m": if field == "price": df = self._get_history_minute_window(assets, end_dt, bar_count, "close") else: df = self._get_history_minute_window(assets, end_dt, bar_count, field) else: raise ValueError("Invalid frequency: {0}".format(frequency)) # forward-fill price if field == "price": if frequency == "1m": ffill_data_frequency = 'minute' elif frequency == "1d": ffill_data_frequency = 'daily' else: raise Exception( "Only 1d and 1m are supported for forward-filling.") assets_with_leading_nan = np.where(isnull(df.iloc[0]))[0] history_start, history_end = df.index[[0, -1]] if ffill_data_frequency == 'daily' and data_frequency == 'minute': # When we're looking for a daily value, but we haven't seen any # volume in today's minute bars yet, we need to use the # previous day's ffilled daily price. Using today's daily price # could yield a value from later today. history_start -= self.trading_calendar.day initial_values = [] for asset in df.columns[assets_with_leading_nan]: last_traded = self.get_last_traded_dt( asset, history_start, ffill_data_frequency, ) if isnull(last_traded): initial_values.append(nan) else: initial_values.append( self.get_adjusted_value( asset, field, dt=last_traded, perspective_dt=history_end, data_frequency=ffill_data_frequency, ) ) # Set leading values for assets that were missing data, then ffill. df.iloc[0, assets_with_leading_nan] = np.array( initial_values, dtype=np.float64 ) df.fillna(method='ffill', inplace=True) # forward-filling will incorrectly produce values after the end of # an asset's lifetime, so write NaNs back over the asset's # end_date. normed_index = df.index.normalize() for asset in df.columns: if history_end >= asset.end_date: # if the window extends past the asset's end date, set # all post-end-date values to NaN in that asset's series df.loc[normed_index > asset.end_date, asset] = nan return df def _get_minute_window_data(self, assets, field, minutes_for_window): """ Internal method that gets a window of adjusted minute data for an asset and specified date range. Used to support the history API method for minute bars. Missing bars are filled with NaN. Parameters ---------- assets : iterable[Asset] The assets whose data is desired. field: string The specific field to return. "open", "high", "close_price", etc. minutes_for_window: pd.DateTimeIndex The list of minutes representing the desired window. Each minute is a pd.Timestamp. Returns ------- A numpy array with requested values. """ return self._minute_history_loader.history(assets, minutes_for_window, field, False) def _get_daily_window_data(self, assets, field, days_in_window, extra_slot=True): """ Internal method that gets a window of adjusted daily data for a sid and specified date range. Used to support the history API method for daily bars. Parameters ---------- asset : Asset The asset whose data is desired. start_dt: pandas.Timestamp The start of the desired window of data. bar_count: int The number of days of data to return. field: string The specific field to return. "open", "high", "close_price", etc. extra_slot: boolean Whether to allocate an extra slot in the returned numpy array. This extra slot will hold the data for the last partial day. It's much better to create it here than to create a copy of the array later just to add a slot. Returns ------- A numpy array with requested values. Any missing slots filled with nan. """ bar_count = len(days_in_window) # create an np.array of size bar_count dtype = float64 if field != 'sid' else int64 if extra_slot: return_array = np.zeros((bar_count + 1, len(assets)), dtype=dtype) else: return_array = np.zeros((bar_count, len(assets)), dtype=dtype) if field != "volume": # volumes default to 0, so we don't need to put NaNs in the array return_array[:] = np.NAN if bar_count != 0: data = self._history_loader.history(assets, days_in_window, field, extra_slot) if extra_slot: return_array[:len(return_array) - 1, :] = data else: return_array[:len(data)] = data return return_array def _get_adjustment_list(self, asset, adjustments_dict, table_name): """ Internal method that returns a list of adjustments for the given sid. Parameters ---------- asset : Asset The asset for which to return adjustments. adjustments_dict: dict A dictionary of sid -> list that is used as a cache. table_name: string The table that contains this data in the adjustments db. Returns ------- adjustments: list A list of [multiplier, pd.Timestamp], earliest first """ if self._adjustment_reader is None: return [] sid = int(asset) try: adjustments = adjustments_dict[sid] except KeyError: adjustments = adjustments_dict[sid] = self._adjustment_reader.\ get_adjustments_for_sid(table_name, sid) return adjustments def get_splits(self, assets, dt): """ Returns any splits for the given sids and the given dt. Parameters ---------- assets : container Assets for which we want splits. dt : pd.Timestamp The date for which we are checking for splits. Note: this is expected to be midnight UTC. Returns ------- splits : list[(asset, float)] List of splits, where each split is a (asset, ratio) tuple. """ if self._adjustment_reader is None or not assets: return [] # convert dt to # of seconds since epoch, because that's what we use # in the adjustments db seconds = int(dt.value / 1e9) c = self._adjustment_reader.conn.cursor() c.execute(f"SELECT sid, ratio FROM SPLITS WHERE effective_date = {seconds}") splits = c.fetchall() splits = [split for split in splits if split[0] in assets] splits = [(self.asset_finder.retrieve_asset(split[0]), split[1]) for split in splits] return splits def get_stock_dividends(self, sid, trading_days): """ Returns all the stock dividends for a specific sid that occur in the given trading range. Parameters ---------- sid: int The asset whose stock dividends should be returned. trading_days: pd.DatetimeIndex The trading range. Returns ------- list: A list of objects with all relevant attributes populated. All timestamp fields are converted to pd.Timestamps. """ if self._adjustment_reader is None: return [] if len(trading_days) == 0: return [] start_dt = trading_days[0].value / 1e9 end_dt = trading_days[-1].value / 1e9 dividends = self._adjustment_reader.conn.execute( "SELECT * " "FROM stock_dividend_payouts WHERE sid = ? AND " "ex_date > ? AND pay_date < ?", (int(sid), start_dt, end_dt,)).\ fetchall() dividend_info = [] for dividend_tuple in dividends: dividend_info.append({ "sid": dividend_tuple[1], "payment_sid": dividend_tuple[2], "ratio": dividend_tuple[3], "declared_date": dividend_tuple[4], "ex_date": pd.Timestamp(dividend_tuple[5], unit="s"), "record_date": pd.Timestamp(dividend_tuple[6], unit="s"), "pay_date": pd.Timestamp(dividend_tuple[7], unit="s"), }) return dividend_info def contains(self, asset, field): return field in BASE_FIELDS or \ (field in self._augmented_sources_map and asset in self._augmented_sources_map[field]) def get_fetcher_assets(self, dt): """ Returns a list of assets for the current date, as defined by the fetcher data. Returns ------- list: a list of Asset objects. """ # return a list of assets for the current date, as defined by the # fetcher source if self._extra_source_df is None: return [] day = normalize_date(dt) if day in self._extra_source_df.index: assets = self._extra_source_df.loc[day]['sid'] else: return [] if isinstance(assets, pd.Series): return [x for x in assets if isinstance(x, Asset)] else: return [assets] if isinstance(assets, Asset) else [] # cache size picked somewhat loosely. this code exists purely to # handle deprecated API. @weak_lru_cache(20) def _get_minute_count_for_transform(self, ending_minute, days_count): # This function works in three steps. # Step 1. Count the minutes from ``ending_minute`` to the start of its # session. # Step 2. Count the minutes from the prior ``days_count - 1`` sessions. # Step 3. Return the sum of the results from steps (1) and (2). # Example (NYSE Calendar) # ending_minute = 2016-12-28 9:40 AM US/Eastern # days_count = 3 # Step 1. Calculate that there are 10 minutes in the ending session. # Step 2. Calculate that there are 390 + 210 = 600 minutes in the prior # two sessions. (Prior sessions are 2015-12-23 and 2015-12-24.) # 2015-12-24 is a half day. # Step 3. Return 600 + 10 = 610. cal = self.trading_calendar ending_session = cal.minute_to_session_label( ending_minute, direction="none", # It's an error to pass a non-trading minute. ) # Assume that calendar days are always full of contiguous minutes, # which means we can just take 1 + (number of minutes between the last # minute and the start of the session). We add one so that we include # the ending minute in the total. ending_session_minute_count = timedelta_to_integral_minutes( ending_minute - cal.open_and_close_for_session(ending_session)[0] ) + 1 if days_count == 1: # We just need sessions for the active day. return ending_session_minute_count # XXX: We're subtracting 2 here to account for two offsets: # 1. We only want ``days_count - 1`` sessions, since we've already # accounted for the ending session above. # 2. The API of ``sessions_window`` is to return one more session than # the requested number. I don't think any consumers actually want # that behavior, but it's the tested and documented behavior right # now, so we have to request one less session than we actually want. completed_sessions = cal.sessions_window( cal.previous_session_label(ending_session), 2 - days_count, ) completed_sessions_minute_count = ( self.trading_calendar.minutes_count_for_sessions_in_range( completed_sessions[0], completed_sessions[-1] ) ) return ending_session_minute_count + completed_sessions_minute_count def get_simple_transform(self, asset, transform_name, dt, data_frequency, bars=None): if transform_name == "returns": # returns is always calculated over the last 2 days, regardless # of the simulation's data frequency. hst = self.get_history_window( [asset], dt, 2, "1d", "price", data_frequency, ffill=True, )[asset] return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0] if bars is None: raise ValueError("bars cannot be None!") if data_frequency == "minute": freq_str = "1m" calculated_bar_count = int(self._get_minute_count_for_transform( dt, bars )) else: freq_str = "1d" calculated_bar_count = bars price_arr = self.get_history_window( [asset], dt, calculated_bar_count, freq_str, "price", data_frequency, ffill=True, )[asset] if transform_name == "mavg": return nanmean(price_arr) elif transform_name == "stddev": return nanstd(price_arr, ddof=1) elif transform_name == "vwap": volume_arr = self.get_history_window( [asset], dt, calculated_bar_count, freq_str, "volume", data_frequency, ffill=True, )[asset] vol_sum = nansum(volume_arr) try: ret = nansum(price_arr * volume_arr) / vol_sum except ZeroDivisionError: ret = np.nan return ret def get_current_future_chain(self, continuous_future, dt): """ Retrieves the future chain for the contract at the given `dt` according the `continuous_future` specification. Returns ------- future_chain : list[Future] A list of active futures, where the first index is the current contract specified by the continuous future definition, the second is the next upcoming contract and so on. """ rf = self._roll_finders[continuous_future.roll_style] session = self.trading_calendar.minute_to_session_label(dt) contract_center = rf.get_contract_center( continuous_future.root_symbol, session, continuous_future.offset) oc = self.asset_finder.get_ordered_contracts( continuous_future.root_symbol) chain = oc.active_chain(contract_center, session.value) return self.asset_finder.retrieve_all(chain) def _get_current_contract(self, continuous_future, dt): rf = self._roll_finders[continuous_future.roll_style] contract_sid = rf.get_contract_center(continuous_future.root_symbol, dt, continuous_future.offset) if contract_sid is None: return None return self.asset_finder.retrieve_asset(contract_sid) @property def adjustment_reader(self): return self._adjustment_reader
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/data_portal.py
data_portal.py
from collections import namedtuple from errno import ENOENT from os import remove import sqlalchemy as sa from sqlalchemy.engine.reflection import Inspector from sqlalchemy.exc import IntegrityError from toolz import first from logbook import Logger import numpy as np from numpy import integer as any_integer import pandas as pd from pandas import Timestamp import six import sqlite3 from zipline.utils.functional import keysorted from zipline.utils.input_validation import preprocess from zipline.utils.numpy_utils import ( datetime64ns_dtype, float64_dtype, int64_dtype, uint32_dtype, uint64_dtype, ) from zipline.utils.pandas_utils import empty_dataframe from zipline.utils.db_utils import group_into_chunks, coerce_string_to_conn from ._adjustments import load_adjustments_from_sqlite log = Logger(__name__) SQLITE_ADJUSTMENT_TABLENAMES = frozenset(['splits', 'dividends', 'mergers']) UNPAID_ALL_QUERY_TEMPLATE = """ SELECT sid, amount, pay_date, ex_date from dividend_payouts WHERE sid IN ({0}) """ UNPAID_QUERY_TEMPLATE = """ SELECT sid, amount, pay_date from dividend_payouts WHERE ex_date={0} AND sid IN ({1}) """ Dividend = namedtuple('Dividend', ['asset', 'amount', 'pay_date']) UNPAID_ALL_STOCK_DIVIDEND_QUERY_TEMPLATE = """ SELECT sid, payment_sid, ratio, pay_date, ex_date from stock_dividend_payouts WHERE sid IN ({0}) """ UNPAID_STOCK_DIVIDEND_QUERY_TEMPLATE = """ SELECT sid, payment_sid, ratio, pay_date from stock_dividend_payouts WHERE ex_date={0} AND sid IN ({1}) """ StockDividend = namedtuple( 'StockDividend', ['asset', 'payment_asset', 'ratio', 'pay_date'], ) SQLITE_ADJUSTMENT_COLUMN_DTYPES = { 'effective_date': any_integer, 'ratio': float64_dtype, 'sid': any_integer, } SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES = { 'sid': any_integer, 'ex_date': any_integer, 'declared_date': any_integer, 'record_date': any_integer, 'pay_date': any_integer, 'amount': float, } SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES = { 'sid': any_integer, 'ex_date': any_integer, 'declared_date': any_integer, 'record_date': any_integer, 'pay_date': any_integer, 'payment_sid': any_integer, 'ratio': float, } def specialize_any_integer(d): out = {} for k, v in six.iteritems(d): if v is any_integer: out[k] = int64_dtype else: out[k] = v return out class SQLiteAdjustmentReader(object): """ Loads adjustments based on corporate actions from a SQLite database. Expects data written in the format output by `SQLiteAdjustmentWriter`. Parameters ---------- conn : str or sqlite3.Connection Connection from which to load data. See Also -------- :class:`zipline.data.adjustments.SQLiteAdjustmentWriter` """ _datetime_int_cols = { 'splits': ('effective_date',), 'mergers': ('effective_date',), 'dividends': ('effective_date',), 'dividend_payouts': ( 'declared_date', 'ex_date', 'pay_date', 'record_date', ), 'stock_dividend_payouts': ( 'declared_date', 'ex_date', 'pay_date', 'record_date', ) } _raw_table_dtypes = { # We use any_integer above to be lenient in accepting different dtypes # from users. For our outputs, however, we always want to return the # same types, and any_integer turns into int32 on some numpy windows # builds, so specify int64 explicitly here. 'splits': specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES), 'mergers': specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES), 'dividends': specialize_any_integer(SQLITE_ADJUSTMENT_COLUMN_DTYPES), 'dividend_payouts': specialize_any_integer( SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES, ), 'stock_dividend_payouts': specialize_any_integer( SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES, ), } @preprocess(conn=coerce_string_to_conn(require_exists=True)) def __init__(self, conn): self.conn = conn self._dividend_cache = {} self._stock_dividend_cache = {} def __enter__(self): return self def __exit__(self, *exc_info): self.close() def close(self): return self.conn.close() def load_adjustments(self, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type): """ Load collection of Adjustment objects from underlying adjustments db. Parameters ---------- dates : pd.DatetimeIndex Dates for which adjustments are needed. assets : pd.Int64Index Assets for which adjustments are needed. should_include_splits : bool Whether split adjustments should be included. should_include_mergers : bool Whether merger adjustments should be included. should_include_dividends : bool Whether dividend adjustments should be included. adjustment_type : str Whether price adjustments, volume adjustments, or both, should be included in the output. Returns ------- adjustments : dict[str -> dict[int -> Adjustment]] A dictionary containing price and/or volume adjustment mappings from index to adjustment objects to apply at that index. """ return load_adjustments_from_sqlite( self.conn, dates, assets, should_include_splits, should_include_mergers, should_include_dividends, adjustment_type, ) def load_pricing_adjustments(self, columns, dates, assets): if 'volume' not in set(columns): adjustment_type = 'price' elif len(set(columns)) == 1: adjustment_type = 'volume' else: adjustment_type = 'all' adjustments = self.load_adjustments( dates, assets, should_include_splits=True, should_include_mergers=True, should_include_dividends=True, adjustment_type=adjustment_type, ) price_adjustments = adjustments.get('price') volume_adjustments = adjustments.get('volume') return [ volume_adjustments if column == 'volume' else price_adjustments for column in columns ] def get_adjustments_for_sid(self, table_name, sid): t = (sid,) c = self.conn.cursor() c.execute(f'SELECT effective_date, ratio FROM {table_name} WHERE sid = {sid}') adjustments_for_sid = c.fetchall() c.close() return [[Timestamp(adjustment[0], unit='s', tz='UTC'), adjustment[1]] for adjustment in adjustments_for_sid] def get_dividends_with_ex_date(self, assets, date, asset_finder): divs = [] seconds = date.value / int(1e9) for asset in assets: sid = int(asset) if not sid in self._dividend_cache: c = self.conn.cursor() self._dividend_cache[sid] = pd.read_sql( UNPAID_ALL_QUERY_TEMPLATE.format(sid), self.conn, index_col='ex_date') try: cached_div = self._dividend_cache[sid].loc[seconds] div = Dividend( asset, cached_div['amount'], Timestamp(cached_div['pay_date'], unit='s', tz='UTC')) divs.append(div) except KeyError: pass return divs def get_stock_dividends_with_ex_date(self, assets, date, asset_finder): stock_divs = [] seconds = date.value / int(1e9) for asset in assets: sid = int(asset) if not sid in self._stock_dividend_cache: c = self.conn.cursor() self._stock_dividend_cache[sid] = pd.read_sql( UNPAID_ALL_STOCK_DIVIDEND_QUERY_TEMPLATE.format(sid), self.conn, index_col='ex_date') try: cached_stock_div = self._stock_dividend_cache[sid].loc[seconds] div = StockDividend( asset, asset_finder.retrieve_asset(cached_stock_div['payment_sid']), cached_stock_div['ratio'], Timestamp(cached_stock_div['pay_date'], unit='s', tz='UTC')) stock_divs.append(div) except KeyError: pass return stock_divs def unpack_db_to_component_dfs(self, convert_dates=False): """Returns the set of known tables in the adjustments file in DataFrame form. Parameters ---------- convert_dates : bool, optional By default, dates are returned in seconds since EPOCH. If convert_dates is True, all ints in date columns will be converted to datetimes. Returns ------- dfs : dict{str->DataFrame} Dictionary which maps table name to the corresponding DataFrame version of the table, where all date columns have been coerced back from int to datetime. """ return { t_name: self.get_df_from_table(t_name, convert_dates) for t_name in self._datetime_int_cols } def get_df_from_table(self, table_name, convert_dates=False): try: date_cols = self._datetime_int_cols[table_name] except KeyError: raise ValueError( "Requested table %s not found.\n" "Available tables: %s\n" % ( table_name, self._datetime_int_cols.keys(), ) ) # Dates are stored in second resolution as ints in adj.db tables. # Need to specifically convert them as UTC, not local time. kwargs = ( {'parse_dates': {col: {'unit': 's', 'utc': True} for col in date_cols} } if convert_dates else {} ) result = pd.read_sql( 'select * from "{}"'.format(table_name), self.conn, index_col='index', **kwargs ).rename_axis(None) if not len(result): dtypes = self._df_dtypes(table_name, convert_dates) return empty_dataframe(*keysorted(dtypes)) return result def _df_dtypes(self, table_name, convert_dates): """Get dtypes to use when unpacking sqlite tables as dataframes. """ out = self._raw_table_dtypes[table_name] if convert_dates: out = out.copy() for date_column in self._datetime_int_cols[table_name]: out[date_column] = datetime64ns_dtype return out class SQLiteAdjustmentWriter(object): """ Writer for data to be read by SQLiteAdjustmentReader Parameters ---------- conn_or_path : str or sqlite3.Connection A handle to the target sqlite database. equity_daily_bar_reader : SessionBarReader Daily bar reader to use for dividend writes. overwrite : bool, optional, default=False If True and conn_or_path is a string, remove any existing files at the given path before connecting. See Also -------- zipline.data.adjustments.SQLiteAdjustmentReader """ def __init__(self, conn_or_path, equity_daily_bar_reader, overwrite=False): if isinstance(conn_or_path, sqlite3.Connection): self.conn = conn_or_path self.engine = False elif isinstance(conn_or_path, six.string_types): if not conn_or_path.startswith('postgresql://'): if overwrite: try: remove(conn_or_path) except OSError as e: if e.errno != ENOENT: raise # switch to regex if we want to support other engines if conn_or_path.startswith('postgresql://'): self.engine = sa.create_engine(conn_or_path) self.conn = self.engine.connect() # not needed for sqlite self._tables = self.ensure_tables() else: self.engine = False self.conn = sqlite3.connect(conn_or_path) self.uri = conn_or_path else: raise TypeError("Unknown connection type %s" % type(conn_or_path)) self._equity_daily_bar_reader = equity_daily_bar_reader def __enter__(self): return self def __exit__(self, *exc_info): self.close() def close(self): self.conn.close() def _write(self, tablename, expected_dtypes, frame): if frame is None or frame.empty: # keeping the dtypes correct for empty frames is not easy frame = pd.DataFrame( np.array([], dtype=list(expected_dtypes.items())), ) else: if frozenset(frame.columns) != frozenset(expected_dtypes): raise ValueError( "Unexpected frame columns:\n" "Expected Columns: %s\n" "Received Columns: %s" % ( set(expected_dtypes), frame.columns.tolist(), ) ) actual_dtypes = frame.dtypes for colname, expected in six.iteritems(expected_dtypes): actual = actual_dtypes[colname] if not np.issubdtype(actual, expected): raise TypeError( "Expected data of type {expected} for column" " '{colname}', but got '{actual}'.".format( expected=expected, colname=colname, actual=actual, ), ) # in case of sqlite, use naive way of writing if not self.engine: frame.to_sql( tablename, self.conn, if_exists='append', chunksize=50000, ) else: frame.reset_index(inplace=True) frame.drop(columns='index', inplace=True) table = self._tables[tablename] constr_table = table # sqlite needs a table-string, postgres needs a table-object if 'sqlite:///' in str(self.engine): constr_table = str(table) insp = Inspector.from_engine(self.engine) constrs = insp.get_unique_constraints(constr_table) uq_cols = set() for constr in constrs: for col in constr['column_names']: uq_cols.add(col) for i, row in frame.iterrows(): values = {} for column in list(frame.columns): values[column] = row[column] try: ins = table.insert().values(values) self.engine.execute(ins) except IntegrityError: uq_col_objs = [col for col in table.columns if col.name in uq_cols] where_cond = False for col_obj in uq_col_objs: if where_cond == False: where_cond = col_obj == values[col_obj.name] else: where_cond = sa.and_(where_cond, col_obj == values[col_obj.name]) upd = table.update().where(where_cond).values(values) self.engine.execute(upd) def write_frame(self, tablename, frame): if tablename not in SQLITE_ADJUSTMENT_TABLENAMES: raise ValueError( "Adjustment table %s not in %s" % ( tablename, SQLITE_ADJUSTMENT_TABLENAMES, ) ) if not (frame is None or frame.empty): frame = frame.copy() frame['effective_date'] = frame['effective_date'].values.astype( 'datetime64[s]', ).astype('int64') return self._write( tablename, SQLITE_ADJUSTMENT_COLUMN_DTYPES, frame, ) def write_dividend_payouts(self, frame): """ Write dividend payout data to SQLite table `dividend_payouts`. """ return self._write( 'dividend_payouts', SQLITE_DIVIDEND_PAYOUT_COLUMN_DTYPES, frame, ) def write_stock_dividend_payouts(self, frame): return self._write( 'stock_dividend_payouts', SQLITE_STOCK_DIVIDEND_PAYOUT_COLUMN_DTYPES, frame, ) def calc_dividend_ratios(self, dividends): """ Calculate the ratios to apply to equities when looking back at pricing history so that the price is smoothed over the ex_date, when the market adjusts to the change in equity value due to upcoming dividend. Returns ------- DataFrame A frame in the same format as splits and mergers, with keys - sid, the id of the equity - effective_date, the date in seconds on which to apply the ratio. - ratio, the ratio to apply to backwards looking pricing data. """ if dividends is None or dividends.empty: return pd.DataFrame(np.array( [], dtype=[ ('sid', uint64_dtype), ('effective_date', uint32_dtype), ('ratio', float64_dtype), ], )) pricing_reader = self._equity_daily_bar_reader input_sids = dividends.sid.values unique_sids, sids_ix = np.unique(input_sids, return_inverse=True) dates = pricing_reader.sessions.values close, = pricing_reader.load_raw_arrays( ['close'], pd.Timestamp(dates[0], tz='UTC'), pd.Timestamp(dates[-1], tz='UTC'), unique_sids, ) date_ix = np.searchsorted(dates, dividends.ex_date.values) mask = date_ix > 0 date_ix = date_ix[mask] sids_ix = sids_ix[mask] input_dates = dividends.ex_date.values[mask] # subtract one day to get the close on the day prior to the merger previous_close = close[date_ix - 1, sids_ix] input_sids = input_sids[mask] amount = dividends.amount.values[mask] ratio = 1.0 - amount / previous_close non_nan_ratio_mask = ~np.isnan(ratio) for ix in np.flatnonzero(~non_nan_ratio_mask): log.warn( "Couldn't compute ratio for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) positive_ratio_mask = ratio > 0 for ix in np.flatnonzero(~positive_ratio_mask & non_nan_ratio_mask): log.warn( "Dividend ratio <= 0 for dividend" " sid={sid}, ex_date={ex_date:%Y-%m-%d}, amount={amount:.3f}", sid=input_sids[ix], ex_date=pd.Timestamp(input_dates[ix]), amount=amount[ix], ) valid_ratio_mask = non_nan_ratio_mask & positive_ratio_mask return pd.DataFrame({ 'sid': input_sids[valid_ratio_mask], 'effective_date': input_dates[valid_ratio_mask], 'ratio': ratio[valid_ratio_mask], }) def _write_dividends(self, dividends): if dividends is None: dividend_payouts = None else: dividend_payouts = dividends.copy() dividend_payouts['ex_date'] = dividend_payouts['ex_date'].values.\ astype('datetime64[s]').astype(int64_dtype) dividend_payouts['record_date'] = \ dividend_payouts['record_date'].values.\ astype('datetime64[s]').astype(int64_dtype) dividend_payouts['declared_date'] = \ dividend_payouts['declared_date'].values.\ astype('datetime64[s]').astype(int64_dtype) dividend_payouts['pay_date'] = \ dividend_payouts['pay_date'].values.astype('datetime64[s]').\ astype(int64_dtype) self.write_dividend_payouts(dividend_payouts) def _write_stock_dividends(self, stock_dividends): if stock_dividends is None: stock_dividend_payouts = None else: stock_dividend_payouts = stock_dividends.copy() stock_dividend_payouts['ex_date'] = \ stock_dividend_payouts['ex_date'].values.\ astype('datetime64[s]').astype(int64_dtype) stock_dividend_payouts['record_date'] = \ stock_dividend_payouts['record_date'].values.\ astype('datetime64[s]').astype(int64_dtype) stock_dividend_payouts['declared_date'] = \ stock_dividend_payouts['declared_date'].\ values.astype('datetime64[s]').astype(int64_dtype) stock_dividend_payouts['pay_date'] = \ stock_dividend_payouts['pay_date'].\ values.astype('datetime64[s]').astype(int64_dtype) self.write_stock_dividend_payouts(stock_dividend_payouts) def write_dividend_data(self, dividends, stock_dividends=None): """ Write both dividend payouts and the derived price adjustment ratios. """ # First write the dividend payouts. self._write_dividends(dividends) self._write_stock_dividends(stock_dividends) # Second from the dividend payouts, calculate ratios. dividend_ratios = self.calc_dividend_ratios(dividends) self.write_frame('dividends', dividend_ratios) def ensure_tables(self): metadata = sa.MetaData() tables = {} tables['dividend_payouts'] = sa.Table( 'dividend_payouts', metadata, sa.Column('id', sa.BigInteger(), unique=True, nullable=False, primary_key=True, autoincrement=True), sa.Column('sid', sa.BigInteger() ), sa.Column('ex_date', sa.BigInteger() ), sa.Column('declared_date', sa.BigInteger() ), sa.Column('record_date', sa.BigInteger() ), sa.Column('pay_date', sa.BigInteger() ), sa.Column('amount', sa.Float() ), sa.UniqueConstraint('sid', 'ex_date', name='div_payouts_uq') ) tables['stock_dividend_payouts'] = sa.Table( 'stock_dividend_payouts', metadata, sa.Column('index', sa.BigInteger(), unique=True, nullable=False, primary_key=True, autoincrement=True), sa.Column('sid', sa.BigInteger() ), sa.Column('ex_date', sa.BigInteger() ), sa.Column('declared_date', sa.BigInteger() ), sa.Column('record_date', sa.BigInteger() ), sa.Column('pay_date', sa.BigInteger() ), sa.Column('payment_sid', sa.BigInteger() ), sa.Column('ratio', sa.Float() ), sa.UniqueConstraint('sid', 'ex_date', name='stk_div_payouts_uq') ) tables['dividends'] = sa.Table( 'dividends', metadata, sa.Column('index', sa.BigInteger(), unique=True, nullable=False, primary_key=True, autoincrement=True), sa.Column('sid', sa.BigInteger() ), sa.Column('effective_date', sa.BigInteger() ), sa.Column('ratio', sa.Float() ), sa.UniqueConstraint('sid', 'effective_date', name='div_uq') ) tables['mergers'] = sa.Table( 'mergers', metadata, sa.Column('index', sa.BigInteger(), unique=True, nullable=False, primary_key=True, autoincrement=True), sa.Column('sid', sa.BigInteger() ), sa.Column('effective_date', sa.BigInteger() ), sa.Column('ratio', sa.Float() ), sa.UniqueConstraint('sid', 'effective_date', name='mergers_uq') ) tables['splits'] = sa.Table( 'splits', metadata, sa.Column('index', sa.BigInteger(), unique=True, primary_key=True, autoincrement=True), sa.Column('sid', sa.BigInteger() ), sa.Column('effective_date', sa.BigInteger() ), sa.Column('ratio', sa.Float() ), sa.UniqueConstraint('sid', 'effective_date', name='splits_uq') ) metadata.create_all(self.engine) return tables def write(self, splits=None, mergers=None, dividends=None, stock_dividends=None): """ Writes data to a SQLite file to be read by SQLiteAdjustmentReader. Parameters ---------- splits : pandas.DataFrame, optional Dataframe containing split data. The format of this dataframe is: effective_date : int The date, represented as seconds since Unix epoch, on which the adjustment should be applied. ratio : float A value to apply to all data earlier than the effective date. For open, high, low, and close those values are multiplied by the ratio. Volume is divided by this value. sid : int The asset id associated with this adjustment. mergers : pandas.DataFrame, optional DataFrame containing merger data. The format of this dataframe is: effective_date : int The date, represented as seconds since Unix epoch, on which the adjustment should be applied. ratio : float A value to apply to all data earlier than the effective date. For open, high, low, and close those values are multiplied by the ratio. Volume is unaffected. sid : int The asset id associated with this adjustment. dividends : pandas.DataFrame, optional DataFrame containing dividend data. The format of the dataframe is: sid : int The asset id associated with this adjustment. ex_date : datetime64 The date on which an equity must be held to be eligible to receive payment. declared_date : datetime64 The date on which the dividend is announced to the public. pay_date : datetime64 The date on which the dividend is distributed. record_date : datetime64 The date on which the stock ownership is checked to determine distribution of dividends. amount : float The cash amount paid for each share. Dividend ratios are calculated as: ``1.0 - (dividend_value / "close on day prior to ex_date")`` stock_dividends : pandas.DataFrame, optional DataFrame containing stock dividend data. The format of the dataframe is: sid : int The asset id associated with this adjustment. ex_date : datetime64 The date on which an equity must be held to be eligible to receive payment. declared_date : datetime64 The date on which the dividend is announced to the public. pay_date : datetime64 The date on which the dividend is distributed. record_date : datetime64 The date on which the stock ownership is checked to determine distribution of dividends. payment_sid : int The asset id of the shares that should be paid instead of cash. ratio : float The ratio of currently held shares in the held sid that should be paid with new shares of the payment_sid. See Also -------- zipline.data.adjustments.SQLiteAdjustmentReader """ self.write_frame('splits', splits) self.write_frame('mergers', mergers) self.write_dividend_data(dividends, stock_dividends) # Use IF NOT EXISTS here to allow multiple writes if desired. self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_sids " "ON splits(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS splits_effective_date " "ON splits(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_sids " "ON mergers(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS mergers_effective_date " "ON mergers(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_sid " "ON dividends(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_effective_date " "ON dividends(effective_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividend_payouts_sid " "ON dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date " "ON dividend_payouts(ex_date)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid " "ON stock_dividend_payouts(sid)" ) self.conn.execute( "CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date " "ON stock_dividend_payouts(ex_date)" )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/adjustments.py
adjustments.py
from collections import OrderedDict from abc import ABCMeta, abstractmethod import numpy as np import pandas as pd from six import with_metaclass from zipline.data._resample import ( _minute_to_session_open, _minute_to_session_high, _minute_to_session_low, _minute_to_session_close, _minute_to_session_volume, ) from zipline.data.bar_reader import NoDataOnDate from zipline.data.minute_bars import MinuteBarReader from zipline.data.session_bars import SessionBarReader from zipline.utils.memoize import lazyval _MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict(( ('open', 'first'), ('high', 'max'), ('low', 'min'), ('close', 'last'), ('volume', 'sum'), )) def minute_frame_to_session_frame(minute_frame, calendar): """ Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like). """ how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns) labels = calendar.minute_index_to_session_labels(minute_frame.index) return minute_frame.groupby(labels).agg(how) def minute_to_session(column, close_locs, data, out): """ Resample an array with minute data into an array with session data. This function assumes that the minute data is the exact length of all minutes in the sessions in the output. Parameters ---------- column : str The `open`, `high`, `low`, `close`, or `volume` column. close_locs : array[intp] The locations in `data` which are the market close minutes. data : array[float64|uint32] The minute data to be sampled into session data. The first value should align with the market open of the first session, containing values for all minutes for all sessions. With the last value being the market close of the last session. out : array[float64|uint32] The output array into which to write the sampled sessions. """ if column == 'open': _minute_to_session_open(close_locs, data, out) elif column == 'high': _minute_to_session_high(close_locs, data, out) elif column == 'low': _minute_to_session_low(close_locs, data, out) elif column == 'close': _minute_to_session_close(close_locs, data, out) elif column == 'volume': _minute_to_session_volume(close_locs, data, out) return out class DailyHistoryAggregator(object): """ Converts minute pricing data into a daily summary, to be used for the last slot in a call to history with a frequency of `1d`. This summary is the same as a daily bar rollup of minute data, with the distinction that the summary is truncated to the `dt` requested. i.e. the aggregation slides forward during a the course of simulation day. Provides aggregation for `open`, `high`, `low`, `close`, and `volume`. The aggregation rules for each price type is documented in their respective """ def __init__(self, market_opens, minute_reader, trading_calendar): self._market_opens = market_opens self._minute_reader = minute_reader self._trading_calendar = trading_calendar # The caches are structured as (date, market_open, entries), where # entries is a dict of asset -> (last_visited_dt, value) # # Whenever an aggregation method determines the current value, # the entry for the respective asset should be overwritten with a new # entry for the current dt.value (int) and aggregation value. # # When the requested dt's date is different from date the cache is # flushed, so that the cache entries do not grow unbounded. # # Example cache: # cache = (date(2016, 3, 17), # pd.Timestamp('2016-03-17 13:31', tz='UTC'), # { # 1: (1458221460000000000, np.nan), # 2: (1458221460000000000, 42.0), # }) self._caches = { 'open': None, 'high': None, 'low': None, 'close': None, 'volume': None } # The int value is used for deltas to avoid extra computation from # creating new Timestamps. self._one_min = pd.Timedelta('1 min').value def _prelude(self, dt, field): session = self._trading_calendar.minute_to_session_label(dt) dt_value = dt.value cache = self._caches[field] if cache is None or cache[0] != session: market_open = self._market_opens.loc[session] cache = self._caches[field] = (session, market_open, {}) _, market_open, entries = cache market_open = market_open.tz_localize('UTC') if dt != market_open: prev_dt = dt_value - self._one_min else: prev_dt = None return market_open, prev_dt, dt_value, entries def opens(self, assets, dt): """ The open field's aggregation returns the first value that occurs for the day, if there has been no data on or before the `dt` the open is `nan`. Once the first non-nan open is seen, that value remains constant per asset for the remainder of the day. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open') opens = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): opens.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'open') entries[asset] = (dt_value, val) opens.append(val) continue else: try: last_visited_dt, first_open = entries[asset] if last_visited_dt == dt_value: opens.append(first_open) continue elif not pd.isnull(first_open): opens.append(first_open) entries[asset] = (dt_value, first_open) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['open'], after_last, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['open'], market_open, dt, [asset], )[0] nonnan = window[~pd.isnull(window)] if len(nonnan): val = nonnan[0] else: val = np.nan entries[asset] = (dt_value, val) opens.append(val) continue return np.array(opens) def highs(self, assets, dt): """ The high field's aggregation returns the largest high seen between the market open and the current dt. If there has been no data on or before the `dt` the high is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'high') highs = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): highs.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'high') entries[asset] = (dt_value, val) highs.append(val) continue else: try: last_visited_dt, last_max = entries[asset] if last_visited_dt == dt_value: highs.append(last_max) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'high') if pd.isnull(curr_val): val = last_max elif pd.isnull(last_max): val = curr_val else: val = max(last_max, curr_val) entries[asset] = (dt_value, val) highs.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['high'], after_last, dt, [asset], )[0].T val = np.nanmax(np.append(window, last_max)) entries[asset] = (dt_value, val) highs.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['high'], market_open, dt, [asset], )[0].T val = np.nanmax(window) entries[asset] = (dt_value, val) highs.append(val) continue return np.array(highs) def lows(self, assets, dt): """ The low field's aggregation returns the smallest low seen between the market open and the current dt. If there has been no data on or before the `dt` the low is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'low') lows = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): lows.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'low') entries[asset] = (dt_value, val) lows.append(val) continue else: try: last_visited_dt, last_min = entries[asset] if last_visited_dt == dt_value: lows.append(last_min) continue elif last_visited_dt == prev_dt: curr_val = self._minute_reader.get_value( asset, dt, 'low') val = np.nanmin([last_min, curr_val]) entries[asset] = (dt_value, val) lows.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['low'], after_last, dt, [asset], )[0].T val = np.nanmin(np.append(window, last_min)) entries[asset] = (dt_value, val) lows.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['low'], market_open, dt, [asset], )[0].T val = np.nanmin(window) entries[asset] = (dt_value, val) lows.append(val) continue return np.array(lows) def closes(self, assets, dt): """ The close field's aggregation returns the latest close at the given dt. If the close for the given dt is `nan`, the most recent non-nan `close` is used. If there has been no data on or before the `dt` the close is `nan`. Returns ------- np.array with dtype=float64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'close') closes = [] session_label = self._trading_calendar.minute_to_session_label(dt) def _get_filled_close(asset): """ Returns the most recent non-nan close for the asset in this session. If there has been no data in this session on or before the `dt`, returns `nan` """ window = self._minute_reader.load_raw_arrays( ['close'], market_open, dt, [asset], )[0] try: return window[~np.isnan(window)][-1] except IndexError: return np.NaN for asset in assets: if not asset.is_alive_for_session(session_label): closes.append(np.NaN) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'close') entries[asset] = (dt_value, val) closes.append(val) continue else: try: last_visited_dt, last_close = entries[asset] if last_visited_dt == dt_value: closes.append(last_close) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = last_close entries[asset] = (dt_value, val) closes.append(val) continue else: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue except KeyError: val = self._minute_reader.get_value( asset, dt, 'close') if pd.isnull(val): val = _get_filled_close(asset) entries[asset] = (dt_value, val) closes.append(val) continue return np.array(closes) def volumes(self, assets, dt): """ The volume field's aggregation returns the sum of all volumes between the market open and the `dt` If there has been no data on or before the `dt` the volume is 0. Returns ------- np.array with dtype=int64, in order of assets parameter. """ market_open, prev_dt, dt_value, entries = self._prelude(dt, 'volume') volumes = [] session_label = self._trading_calendar.minute_to_session_label(dt) for asset in assets: if not asset.is_alive_for_session(session_label): volumes.append(0) continue if prev_dt is None: val = self._minute_reader.get_value(asset, dt, 'volume') entries[asset] = (dt_value, val) volumes.append(val) continue else: try: last_visited_dt, last_total = entries[asset] if last_visited_dt == dt_value: volumes.append(last_total) continue elif last_visited_dt == prev_dt: val = self._minute_reader.get_value( asset, dt, 'volume') val += last_total entries[asset] = (dt_value, val) volumes.append(val) continue else: after_last = pd.Timestamp( last_visited_dt + self._one_min, tz='UTC') window = self._minute_reader.load_raw_arrays( ['volume'], after_last, dt, [asset], )[0] val = np.nansum(window) + last_total entries[asset] = (dt_value, val) volumes.append(val) continue except KeyError: window = self._minute_reader.load_raw_arrays( ['volume'], market_open, dt, [asset], )[0] val = np.nansum(window) entries[asset] = (dt_value, val) volumes.append(val) continue return np.array(volumes) class MinuteResampleSessionBarReader(SessionBarReader): def __init__(self, calendar, minute_bar_reader): self._calendar = calendar self._minute_bar_reader = minute_bar_reader def _get_resampled(self, columns, start_session, end_session, assets): range_open = self._calendar.session_open(start_session) range_close = self._calendar.session_close(end_session) minute_data = self._minute_bar_reader.load_raw_arrays( columns, range_open, range_close, assets, ) # Get the index of the close minute for each session in the range. # If the range contains only one session, the only close in the range # is the last minute in the data. Otherwise, we need to get all the # session closes and find their indices in the range of minutes. if start_session == end_session: close_ilocs = np.array([len(minute_data[0]) - 1], dtype=np.int64) else: minutes = self._calendar.minutes_in_range( range_open, range_close, ) session_closes = self._calendar.session_closes_in_range( start_session, end_session, ) close_ilocs = minutes.searchsorted(pd.to_datetime(session_closes.values, utc=True)) results = [] shape = (len(close_ilocs), len(assets)) for col in columns: if col != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) results.append(out) for i in range(len(assets)): for j, column in enumerate(columns): data = minute_data[j][:, i] minute_to_session(column, close_ilocs, data, results[j][:, i]) return results @property def trading_calendar(self): return self._calendar def load_raw_arrays(self, columns, start_dt, end_dt, sids): return self._get_resampled(columns, start_dt, end_dt, sids) def get_value(self, sid, session, colname): # WARNING: This will need caching or other optimization if used in a # tight loop. # This was developed to complete interface, but has not been tuned # for real world use. return self._get_resampled([colname], session, session, [sid])[0][0][0] @lazyval def sessions(self): cal = self._calendar first = self._minute_bar_reader.first_trading_day last = cal.minute_to_session_label( self._minute_bar_reader.last_available_dt) return cal.sessions_in_range(first, last) @lazyval def last_available_dt(self): return self.trading_calendar.minute_to_session_label( self._minute_bar_reader.last_available_dt ) @property def first_trading_day(self): return self._minute_bar_reader.first_trading_day def get_last_traded_dt(self, asset, dt): return self.trading_calendar.minute_to_session_label( self._minute_bar_reader.get_last_traded_dt(asset, dt)) class ReindexBarReader(with_metaclass(ABCMeta)): """ A base class for readers which reindexes results, filling in the additional indices with empty data. Used to align the reading assets which trade on different calendars. Currently only supports a ``trading_calendar`` which is a superset of the ``reader``'s calendar. Parameters ---------- - trading_calendar : zipline.utils.trading_calendar.TradingCalendar The calendar to use when indexing results from the reader. - reader : MinuteBarReader|SessionBarReader The reader which has a calendar that is a subset of the desired ``trading_calendar``. - first_trading_session : pd.Timestamp The first trading session the reader should provide. Must be specified, since the ``reader``'s first session may not exactly align with the desired calendar. Specifically, in the case where the first session on the target calendar is a holiday on the ``reader``'s calendar. - last_trading_session : pd.Timestamp The last trading session the reader should provide. Must be specified, since the ``reader``'s last session may not exactly align with the desired calendar. Specifically, in the case where the last session on the target calendar is a holiday on the ``reader``'s calendar. """ def __init__(self, trading_calendar, reader, first_trading_session, last_trading_session): self._trading_calendar = trading_calendar self._reader = reader self._first_trading_session = first_trading_session self._last_trading_session = last_trading_session @property def last_available_dt(self): return self._reader.last_available_dt def get_last_traded_dt(self, sid, dt): return self._reader.get_last_traded_dt(sid, dt) @property def first_trading_day(self): return self._reader.first_trading_day def get_value(self, sid, dt, field): # Give an empty result if no data is present. try: return self._reader.get_value(sid, dt, field) except NoDataOnDate: if field == 'volume': return 0 else: return np.nan @abstractmethod def _outer_dts(self, start_dt, end_dt): raise NotImplementedError @abstractmethod def _inner_dts(self, start_dt, end_dt): raise NotImplementedError @property def trading_calendar(self): return self._trading_calendar @lazyval def sessions(self): return self.trading_calendar.sessions_in_range( self._first_trading_session, self._last_trading_session ) def load_raw_arrays(self, fields, start_dt, end_dt, sids): outer_dts = self._outer_dts(start_dt, end_dt) inner_dts = self._inner_dts(start_dt, end_dt) indices = outer_dts.searchsorted(inner_dts) shape = len(outer_dts), len(sids) outer_results = [] if len(inner_dts) > 0: inner_results = self._reader.load_raw_arrays( fields, inner_dts[0], inner_dts[-1], sids) else: inner_results = None for i, field in enumerate(fields): if field != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) if inner_results is not None: out[indices] = inner_results[i] outer_results.append(out) return outer_results class ReindexMinuteBarReader(ReindexBarReader, MinuteBarReader): """ See: ``ReindexBarReader`` """ def _outer_dts(self, start_dt, end_dt): return self._trading_calendar.minutes_in_range(start_dt, end_dt) def _inner_dts(self, start_dt, end_dt): return self._reader.calendar.minutes_in_range(start_dt, end_dt) class ReindexSessionBarReader(ReindexBarReader, SessionBarReader): """ See: ``ReindexBarReader`` """ def _outer_dts(self, start_dt, end_dt): return self.trading_calendar.sessions_in_range(start_dt, end_dt) def _inner_dts(self, start_dt, end_dt): return self._reader.trading_calendar.sessions_in_range( start_dt, end_dt)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/resample.py
resample.py
import numpy as np import pandas as pd from zipline.data.session_bars import SessionBarReader class ContinuousFutureSessionBarReader(SessionBarReader): def __init__(self, bar_reader, roll_finders): self._bar_reader = bar_reader self._roll_finders = roll_finders def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- fields : list of str 'sid' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ rolls_by_asset = {} for asset in assets: rf = self._roll_finders[asset.roll_style] rolls_by_asset[asset] = rf.get_rolls( asset.root_symbol, start_date, end_date, asset.offset ) num_sessions = len( self.trading_calendar.sessions_in_range(start_date, end_date) ) shape = num_sessions, len(assets) results = [] tc = self._bar_reader.trading_calendar sessions = tc.sessions_in_range(start_date, end_date) # Get partitions partitions_by_asset = {} for asset in assets: partitions = [] partitions_by_asset[asset] = partitions rolls = rolls_by_asset[asset] start = start_date for roll in rolls: sid, roll_date = roll start_loc = sessions.get_loc(start) if roll_date is not None: end = roll_date - sessions.freq end_loc = sessions.get_loc(end) else: end = end_date end_loc = len(sessions) - 1 partitions.append((sid, start, end, start_loc, end_loc)) if roll_date is not None: start = sessions[end_loc + 1] for column in columns: if column != 'volume' and column != 'sid': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.int64) for i, asset in enumerate(assets): partitions = partitions_by_asset[asset] for sid, start, end, start_loc, end_loc in partitions: if column != 'sid': result = self._bar_reader.load_raw_arrays( [column], start, end, [sid])[0][:, 0] else: result = int(sid) out[start_loc:end_loc + 1, i] = result results.append(out) return results @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return self._bar_reader.last_available_dt @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ return self._bar_reader.trading_calendar @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return self._bar_reader.first_trading_day def get_value(self, continuous_future, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. """ rf = self._roll_finders[continuous_future.roll_style] sid = (rf.get_contract_center(continuous_future.root_symbol, dt, continuous_future.offset)) return self._bar_reader.get_value(sid, dt, field) def get_last_traded_dt(self, asset, dt): """ Get the latest minute on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded minute. dt : pd.Timestamp The minute at which to start searching for the last traded minute. Returns ------- last_traded : pd.Timestamp The dt of the last trade for the given asset, using the input dt as a vantage point. """ rf = self._roll_finders[asset.roll_style] sid = (rf.get_contract_center(asset.root_symbol, dt, asset.offset)) if sid is None: return pd.NaT contract = rf.asset_finder.retrieve_asset(sid) return self._bar_reader.get_last_traded_dt(contract, dt) @property def sessions(self): """ Returns ------- sessions : DatetimeIndex All session labels (unioning the range for all assets) which the reader can provide. """ return self._bar_reader.sessions class ContinuousFutureMinuteBarReader(SessionBarReader): def __init__(self, bar_reader, roll_finders): self._bar_reader = bar_reader self._roll_finders = roll_finders def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- fields : list of str 'open', 'high', 'low', 'close', or 'volume' start_dt: Timestamp Beginning of the window range. end_dt: Timestamp End of the window range. sids : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ rolls_by_asset = {} tc = self.trading_calendar start_session = tc.minute_to_session_label(start_date) end_session = tc.minute_to_session_label(end_date) for asset in assets: rf = self._roll_finders[asset.roll_style] rolls_by_asset[asset] = rf.get_rolls( asset.root_symbol, start_session, end_session, asset.offset) sessions = tc.sessions_in_range(start_date, end_date) minutes = tc.minutes_in_range(start_date, end_date) num_minutes = len(minutes) shape = num_minutes, len(assets) results = [] # Get partitions partitions_by_asset = {} for asset in assets: partitions = [] partitions_by_asset[asset] = partitions rolls = rolls_by_asset[asset] start = start_date for roll in rolls: sid, roll_date = roll start_loc = minutes.searchsorted(start) if roll_date is not None: _, end = tc.open_and_close_for_session( roll_date - sessions.freq) end_loc = minutes.searchsorted(end) else: end = end_date end_loc = len(minutes) - 1 partitions.append((sid, start, end, start_loc, end_loc)) if roll[-1] is not None: start, _ = tc.open_and_close_for_session( tc.minute_to_session_label(minutes[end_loc + 1])) for column in columns: if column != 'volume': out = np.full(shape, np.nan) else: out = np.zeros(shape, dtype=np.uint32) for i, asset in enumerate(assets): partitions = partitions_by_asset[asset] for sid, start, end, start_loc, end_loc in partitions: if column != 'sid': result = self._bar_reader.load_raw_arrays( [column], start, end, [sid])[0][:, 0] else: result = int(sid) out[start_loc:end_loc + 1, i] = result results.append(out) return results @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return self._bar_reader.last_available_dt @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ return self._bar_reader.trading_calendar @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return self._bar_reader.first_trading_day def get_value(self, continuous_future, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. """ rf = self._roll_finders[continuous_future.roll_style] sid = (rf.get_contract_center(continuous_future.root_symbol, dt, continuous_future.offset)) return self._bar_reader.get_value(sid, dt, field) def get_last_traded_dt(self, asset, dt): """ Get the latest minute on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded minute. dt : pd.Timestamp The minute at which to start searching for the last traded minute. Returns ------- last_traded : pd.Timestamp The dt of the last trade for the given asset, using the input dt as a vantage point. """ rf = self._roll_finders[asset.roll_style] sid = (rf.get_contract_center(asset.root_symbol, dt, asset.offset)) if sid is None: return pd.NaT contract = rf.asset_finder.retrieve_asset(sid) return self._bar_reader.get_last_traded_dt(contract, dt) @property def sessions(self): return self._bar_reader.sessions
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/continuous_future_reader.py
continuous_future_reader.py
from functools import partial import h5py import logbook import numpy as np import pandas as pd from six import iteritems, raise_from, viewkeys from six.moves import reduce from zipline.data.bar_reader import ( NoDataAfterDate, NoDataBeforeDate, NoDataForSid, NoDataOnDate, ) from zipline.data.session_bars import CurrencyAwareSessionBarReader from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array from zipline.utils.pandas_utils import check_indexes_all_same log = logbook.Logger('HDF5DailyBars') VERSION = 0 DATA = 'data' INDEX = 'index' LIFETIMES = 'lifetimes' CURRENCY = 'currency' CODE = 'code' SCALING_FACTOR = 'scaling_factor' OPEN = 'open' HIGH = 'high' LOW = 'low' CLOSE = 'close' VOLUME = 'volume' FIELDS = (OPEN, HIGH, LOW, CLOSE, VOLUME) DAY = 'day' SID = 'sid' START_DATE = 'start_date' END_DATE = 'end_date' # XXX is reserved for "transactions involving no currency". MISSING_CURRENCY = 'XXX' DEFAULT_SCALING_FACTORS = { # Retain 3 decimal places for prices. OPEN: 1000, HIGH: 1000, LOW: 1000, CLOSE: 1000, # Volume is expected to be a whole integer. VOLUME: 1, } def coerce_to_uint32(a, scaling_factor): """ Returns a copy of the array as uint32, applying a scaling factor to maintain precision if supplied. """ return (a * scaling_factor).round().astype('uint32') def days_and_sids_for_frames(frames): """ Returns the date index and sid columns shared by a list of dataframes, ensuring they all match. Parameters ---------- frames : list[pd.DataFrame] A list of dataframes indexed by day, with a column per sid. Returns ------- days : np.array[datetime64[ns]] The days in these dataframes. sids : np.array[int64] The sids in these dataframes. Raises ------ ValueError If the dataframes passed are not all indexed by the same days and sids. """ if not frames: days = np.array([], dtype='datetime64[ns]') sids = np.array([], dtype='int64') return days, sids # Ensure the indices and columns all match. check_indexes_all_same( [frame.index for frame in frames], message='Frames have mismatched days.', ) check_indexes_all_same( [frame.columns for frame in frames], message='Frames have mismatched sids.', ) return frames[0].index.values, frames[0].columns.values class HDF5DailyBarWriter(object): """ Class capable of writing daily OHLCV data to disk in a format that can be read efficiently by HDF5DailyBarReader. Parameters ---------- filename : str The location at which we should write our output. date_chunk_size : int The number of days per chunk in the HDF5 file. If this is greater than the number of days in the data, the chunksize will match the actual number of days. See Also -------- zipline.data.hdf5_daily_bars.HDF5DailyBarReader """ def __init__(self, filename, date_chunk_size): self._filename = filename self._date_chunk_size = date_chunk_size def h5_file(self, mode): return h5py.File(self._filename, mode) def write(self, country_code, frames, currency_codes=None, scaling_factors=None): """ Write the OHLCV data for one country to the HDF5 file. Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid. The dataframes need to have the same index and columns. currency_codes : pd.Series, optional Series mapping sids to 3-digit currency code values for those sids' listing currencies. If not passed, missing currencies will be written. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """ if scaling_factors is None: scaling_factors = DEFAULT_SCALING_FACTORS # Note that this functions validates that all of the frames # share the same days and sids. days, sids = days_and_sids_for_frames(list(frames.values())) # XXX: We should make this required once we're using it everywhere. if currency_codes is None: currency_codes = pd.Series(index=sids, data=MISSING_CURRENCY) # Currency codes should match dataframe columns. check_sids_arrays_match( sids, currency_codes.index.values, message="currency_codes sids do not match data sids:", ) # Write start and end dates for each sid. start_date_ixs, end_date_ixs = compute_asset_lifetimes(frames) if len(sids): chunks = (len(sids), min(self._date_chunk_size, len(days))) else: # h5py crashes if we provide chunks for empty data. chunks = None with self.h5_file(mode='a') as h5_file: # ensure that the file version has been written h5_file.attrs['version'] = VERSION country_group = h5_file.create_group(country_code) self._write_index_group(country_group, days, sids) self._write_lifetimes_group( country_group, start_date_ixs, end_date_ixs, ) self._write_currency_group(country_group, currency_codes) self._write_data_group( country_group, frames, scaling_factors, chunks, ) def write_from_sid_df_pairs(self, country_code, data, currency_codes=None, scaling_factors=None): """ Parameters ---------- country_code : str The ISO 3166 alpha-2 country code for this country. data : iterable[tuple[int, pandas.DataFrame]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. currency_codes : pd.Series, optional Series mapping sids to 3-digit currency code values for those sids' listing currencies. If not passed, missing currencies will be written. scaling_factors : dict[str, float], optional A dict mapping each OHLCV field to a scaling factor, which is applied (as a multiplier) to the values of field to efficiently store them as uint32, while maintaining desired precision. These factors are written to the file as metadata, which is consumed by the reader to adjust back to the original float values. Default is None, in which case DEFAULT_SCALING_FACTORS is used. """ data = list(data) if not data: empty_frame = pd.DataFrame( data=None, index=np.array([], dtype='datetime64[ns]'), columns=np.array([], dtype='int64'), ) return self.write( country_code, {f: empty_frame.copy() for f in FIELDS}, scaling_factors, ) sids, frames = zip(*data) ohlcv_frame = pd.concat(frames) # Repeat each sid for each row in its corresponding frame. sid_ix = np.repeat(sids, [len(f) for f in frames]) # Add id to the index, so the frame is indexed by (date, id). ohlcv_frame.set_index(sid_ix, append=True, inplace=True) frames = { field: ohlcv_frame[field].unstack() for field in FIELDS } return self.write( country_code=country_code, frames=frames, scaling_factors=scaling_factors, currency_codes=currency_codes ) def _write_index_group(self, country_group, days, sids): """Write /country/index. """ index_group = country_group.create_group(INDEX) self._log_writing_dataset(index_group) index_group.create_dataset(SID, data=sids) # h5py does not support datetimes, so they need to be stored # as integers. index_group.create_dataset(DAY, data=days.astype(np.int64)) def _write_lifetimes_group(self, country_group, start_date_ixs, end_date_ixs): """Write /country/lifetimes """ lifetimes_group = country_group.create_group(LIFETIMES) self._log_writing_dataset(lifetimes_group) lifetimes_group.create_dataset(START_DATE, data=start_date_ixs) lifetimes_group.create_dataset(END_DATE, data=end_date_ixs) def _write_currency_group(self, country_group, currencies): """Write /country/currency """ currency_group = country_group.create_group(CURRENCY) self._log_writing_dataset(currency_group) currency_group.create_dataset( CODE, data=currencies.values.astype(dtype='S3'), ) def _write_data_group(self, country_group, frames, scaling_factors, chunks): """Write /country/data """ data_group = country_group.create_group(DATA) self._log_writing_dataset(data_group) for field in FIELDS: frame = frames[field] # Sort rows by increasing sid, and columns by increasing date. frame.sort_index(inplace=True) frame.sort_index(axis='columns', inplace=True) data = coerce_to_uint32( frame.T.fillna(0).values, scaling_factors[field], ) dataset = data_group.create_dataset( field, compression='lzf', shuffle=True, data=data, chunks=chunks, ) self._log_writing_dataset(dataset) dataset.attrs[SCALING_FACTOR] = scaling_factors[field] log.debug( 'Writing dataset {} to file {}', dataset.name, self._filename ) def _log_writing_dataset(self, dataset): log.debug("Writing {} to file {}", dataset.name, self._filename) def compute_asset_lifetimes(frames): """ Parameters ---------- frames : dict[str, pd.DataFrame] A dict mapping each OHLCV field to a dataframe with a row for each date and a column for each sid, as passed to write(). Returns ------- start_date_ixs : np.array[int64] The index of the first date with non-nan values, for each sid. end_date_ixs : np.array[int64] The index of the last date with non-nan values, for each sid. """ # Build a 2D array (dates x sids), where an entry is True if all # fields are nan for the given day and sid. is_null_matrix = np.logical_and.reduce( [frames[field].isnull().values for field in FIELDS], ) if not is_null_matrix.size: empty = np.array([], dtype='int64') return empty, empty.copy() # Offset of the first null from the start of the input. start_date_ixs = is_null_matrix.argmin(axis=0) # Offset of the last null from the **end** of the input. end_offsets = is_null_matrix[::-1].argmin(axis=0) # Offset of the last null from the start of the input end_date_ixs = is_null_matrix.shape[0] - end_offsets - 1 return start_date_ixs, end_date_ixs def convert_price_with_scaling_factor(a, scaling_factor): conversion_factor = (1.0 / scaling_factor) zeroes = (a == 0) return np.where(zeroes, np.nan, a.astype('float64')) * conversion_factor class HDF5DailyBarReader(CurrencyAwareSessionBarReader): """ Parameters --------- country_group : h5py.Group The group for a single country in an HDF5 daily pricing file. """ def __init__(self, country_group): self._country_group = country_group self._postprocessors = { OPEN: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(OPEN)), HIGH: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(HIGH)), LOW: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(LOW)), CLOSE: partial(convert_price_with_scaling_factor, scaling_factor=self._read_scaling_factor(CLOSE)), VOLUME: lambda a: a, } @classmethod def from_file(cls, h5_file, country_code): """ Construct from an h5py.File and a country code. Parameters ---------- h5_file : h5py.File An HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """ if h5_file.attrs['version'] != VERSION: raise ValueError( 'mismatched version: file is of version %s, expected %s' % ( h5_file.attrs['version'], VERSION, ), ) return cls(h5_file[country_code]) @classmethod def from_path(cls, path, country_code): """ Construct from a file path and a country code. Parameters ---------- path : str The path to an HDF5 daily pricing file. country_code : str The ISO 3166 alpha-2 country code for the country to read. """ return cls.from_file(h5py.File(path, 'r'), country_code) def _read_scaling_factor(self, field): return self._country_group[DATA][field].attrs[SCALING_FACTOR] def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ self._validate_timestamp(start_date) self._validate_timestamp(end_date) start = start_date.asm8 end = end_date.asm8 date_slice = self._compute_date_range_slice(start, end) n_dates = date_slice.stop - date_slice.start # Create a buffer into which we'll read data from the h5 file. # Allocate an extra row of space that will always contain null values. # We'll use that space to provide "data" for entries in ``assets`` that # are unknown to us. full_buf = np.zeros((len(self.sids) + 1, n_dates), dtype=np.uint32) # We'll only read values into this portion of the read buf. mutable_buf = full_buf[:-1] # Indexer that converts an array aligned to self.sids (which is what we # pull from the h5 file) into an array aligned to ``assets``. # # Unknown assets will have an index of -1, which means they'll always # pull from the last row of the read buffer. We allocated an extra # empty row above so that these lookups will cause us to fill our # output buffer with "null" values. sid_selector = self._make_sid_selector(assets) out = [] for column in columns: # Zero the buffer to prepare to receive new data. mutable_buf.fill(0) dataset = self._country_group[DATA][column] # Fill the mutable portion of our buffer with data from the file. dataset.read_direct( mutable_buf, np.s_[:, date_slice], ) # Select data from the **full buffer**. Unknown assets will pull # from the last row, which is always empty. out.append(self._postprocessors[column](full_buf[sid_selector].T)) return out def _make_sid_selector(self, assets): """ Build an indexer mapping ``self.sids`` to ``assets``. Parameters ---------- assets : list[int] List of assets requested by a caller of ``load_raw_arrays``. Returns ------- index : np.array[int64] Index array containing the index in ``self.sids`` for each location in ``assets``. Entries in ``assets`` for which we don't have a sid will contain -1. It is caller's responsibility to handle these values correctly. """ assets = np.array(assets) sid_selector = self.sids.searchsorted(assets) unknown = np.in1d(assets, self.sids, invert=True) sid_selector[unknown] = -1 return sid_selector def _compute_date_range_slice(self, start_date, end_date): # Get the index of the start of dates for ``start_date``. start_ix = self.dates.searchsorted(start_date) # Get the index of the start of the first date **after** end_date. end_ix = self.dates.searchsorted(end_date, side='right') return slice(start_ix, end_ix) def _validate_assets(self, assets): """Validate that asset identifiers are contained in the daily bars. Parameters ---------- assets : array-like[int] The asset identifiers to validate. Raises ------ NoDataForSid If one or more of the provided asset identifiers are not contained in the daily bars. """ missing_sids = np.setdiff1d(assets, self.sids) if len(missing_sids): raise NoDataForSid( 'Assets not contained in daily pricing file: {}'.format( missing_sids ) ) def _validate_timestamp(self, ts): if ts.asm8 not in self.dates: raise NoDataOnDate(ts) @lazyval def dates(self): return self._country_group[INDEX][DAY][:].astype('datetime64[ns]') @lazyval def sids(self): return self._country_group[INDEX][SID][:].astype('int64', copy=False) @lazyval def asset_start_dates(self): return self.dates[self._country_group[LIFETIMES][START_DATE][:]] @lazyval def asset_end_dates(self): return self.dates[self._country_group[LIFETIMES][END_DATE][:]] @lazyval def _currency_codes(self): bytes_array = self._country_group[CURRENCY][CODE][:] return bytes_array_to_native_str_object_array(bytes_array) def currency_codes(self, sids): """Get currencies in which prices are quoted for the requested sids. Parameters ---------- sids : np.array[int64] Array of sids for which currencies are needed. Returns ------- currency_codes : np.array[object] Array of currency codes for listing currencies of ``sids``. """ # Find the index of requested sids in our stored sids. ixs = self.sids.searchsorted(sids, side='left') result = self._currency_codes[ixs] # searchsorted returns the index of the next lowest sid if the lookup # fails. Fill these sids with the special "missing" sentinel. not_found = (self.sids[ixs] != sids) result[not_found] = None return result @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return pd.Timestamp(self.dates[-1], tz='UTC') @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ raise NotImplementedError( 'HDF5 pricing does not yet support trading calendars.' ) @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return pd.Timestamp(self.dates[0], tz='UTC') @lazyval def sessions(self): """ Returns ------- sessions : DatetimeIndex All session labels (unioning the range for all assets) which the reader can provide. """ return pd.to_datetime(self.dates, utc=True) def get_value(self, sid, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. """ self._validate_assets([sid]) self._validate_timestamp(dt) sid_ix = self.sids.searchsorted(sid) dt_ix = self.dates.searchsorted(dt.asm8) value = self._postprocessors[field]( self._country_group[DATA][field][sid_ix, dt_ix] ) # When the value is nan, this dt may be outside the asset's lifetime. # If that's the case, the proper NoDataOnDate exception is raised. # Otherwise (when there's just a hole in the middle of the data), the # nan is returned. if np.isnan(value): if dt.asm8 < self.asset_start_dates[sid_ix]: raise NoDataBeforeDate() if dt.asm8 > self.asset_end_dates[sid_ix]: raise NoDataAfterDate() return value def get_last_traded_dt(self, asset, dt): """ Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point. """ sid_ix = self.sids.searchsorted(asset.sid) # Used to get a slice of all dates up to and including ``dt``. dt_limit_ix = self.dates.searchsorted(dt.asm8, side='right') # Get the indices of all dates with nonzero volume. nonzero_volume_ixs = np.ravel( np.nonzero(self._country_group[DATA][VOLUME][sid_ix, :dt_limit_ix]) ) if len(nonzero_volume_ixs) == 0: return pd.NaT return pd.Timestamp(self.dates[nonzero_volume_ixs][-1], tz='UTC') class MultiCountryDailyBarReader(CurrencyAwareSessionBarReader): """ Parameters --------- readers : dict[str -> SessionBarReader] A dict mapping country codes to SessionBarReader instances to service each country. """ def __init__(self, readers): self._readers = readers self._country_map = pd.concat([ pd.Series(index=reader.sids, data=country_code) for country_code, reader in iteritems(readers) ]) @classmethod def from_file(cls, h5_file): """ Construct from an h5py.File. Parameters ---------- h5_file : h5py.File An HDF5 daily pricing file. """ return cls({ country: HDF5DailyBarReader.from_file(h5_file, country) for country in h5_file.keys() }) @classmethod def from_path(cls, path): """ Construct from a file path. Parameters ---------- path : str Path to an HDF5 daily pricing file. """ return cls.from_file(h5py.File(path, 'r')) @property def countries(self): """A set-like object of the country codes supplied by this reader. """ return viewkeys(self._readers) def _country_code_for_assets(self, assets): country_codes = self._country_map.get(assets) # In some versions of pandas (observed in 0.22), Series.get() # returns None if none of the labels are in the index. if country_codes is not None: unique_country_codes = country_codes.dropna().unique() num_countries = len(unique_country_codes) else: num_countries = 0 if num_countries == 0: raise ValueError('At least one valid asset id is required.') elif num_countries > 1: raise NotImplementedError( ( 'Assets were requested from multiple countries ({}),' ' but multi-country reads are not yet supported.' ).format(list(unique_country_codes)) ) return np.asscalar(unique_country_codes) def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ country_code = self._country_code_for_assets(assets) return self._readers[country_code].load_raw_arrays( columns, start_date, end_date, assets, ) @property def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ return max( reader.last_available_dt for reader in self._readers.values() ) @property def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ raise NotImplementedError( 'HDF5 pricing does not yet support trading calendars.' ) @property def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ return min( reader.first_trading_day for reader in self._readers.values() ) @property def sessions(self): """ Returns ------- sessions : DatetimeIndex All session labels (unioning the range for all assets) which the reader can provide. """ return pd.to_datetime( reduce( np.union1d, (reader.dates for reader in self._readers.values()), ), utc=True, ) def get_value(self, sid, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. NoDataForSid If the given sid is not valid. """ try: country_code = self._country_code_for_assets([sid]) except ValueError as exc: raise_from( NoDataForSid( 'Asset not contained in daily pricing file: {}'.format(sid) ), exc ) return self._readers[country_code].get_value(sid, dt, field) def get_last_traded_dt(self, asset, dt): """ Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point. """ country_code = self._country_code_for_assets([asset.sid]) return self._readers[country_code].get_last_traded_dt(asset, dt) def currency_codes(self, sids): """Get currencies in which prices are quoted for the requested sids. Assumes that a sid's prices are always quoted in a single currency. Parameters ---------- sids : np.array[int64] Array of sids for which currencies are needed. Returns ------- currency_codes : np.array[S3] Array of currency codes for listing currencies of ``sids``. """ country_code = self._country_code_for_assets(sids) return self._readers[country_code].currency_codes(sids) def check_sids_arrays_match(left, right, message): """Check that two 1d arrays of sids are equal """ if len(left) != len(right): raise ValueError( "{}:\nlen(left) ({}) != len(right) ({})".format( message, len(left), len(right) ) ) diff = (left != right) if diff.any(): (bad_locs,) = np.where(diff) raise ValueError( "{}:\n Indices with differences: {}".format(message, bad_locs) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/hdf5_daily_bars.py
hdf5_daily_bars.py
from datetime import timedelta import pandas as pd from zipline.data.data_portal import DataPortal from logbook import Logger log = Logger('DataPortalLive') class DataPortalLive(DataPortal): def __init__(self, broker, *args, **kwargs): self.broker = broker super(DataPortalLive, self).__init__(*args, **kwargs) def get_last_traded_dt(self, asset, dt, data_frequency): return self.broker.get_last_traded_dt(asset) def get_spot_value(self, assets, field, dt, data_frequency): return self.broker.get_spot_value(assets, field, dt, data_frequency) def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): # This method is responsible for merging the ingested historical data # with the real-time collected data through the Broker. # DataPortal.get_history_window() is called with ffill=False to mark # the missing fields with NaNs. After merge on the historical and # real-time data the missing values (NaNs) are filled based on their # next available values in the requested time window. # # Warning: setting ffill=True in DataPortal.get_history_window() call # results a wrong behavior: The last available value reported by # get_spot_value() will be used to fill the missing data - which is # always representing the current spot price presented by Broker. if frequency == '1d': # if you want today's open price - get minute data and filter the open time historical_bars = super(DataPortalLive, self).get_history_window( assets, end_dt - timedelta(days=1), bar_count, frequency, field, data_frequency, ffill=True) return historical_bars realtime_bars = self.broker.get_realtime_bars(assets, frequency) # Broker.get_realtime_history() returns the asset as level 0 column, # open, high, low, close, volume returned as level 1 columns. # To filter for field the levels needs to be swapped realtime_bars = realtime_bars.swaplevel(0, 1, axis=1) ohlcv_field = 'close' if field == 'price' else field realtime_bars = realtime_bars[ohlcv_field] if ffill and field == 'price': # Simple forward fill is not enough here as the last ingested # value might be outside of the requested time window. That case # the time series starts with NaN and forward filling won't help. # To provide values for such cases we backward fill. # Backward fill as a second operation will have no effect if the # forward-fill was successful. realtime_bars.fillna(method='ffill', inplace=True) realtime_bars.fillna(method='bfill', inplace=True) realtime_bars.columns = assets return realtime_bars[-bar_count:] def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency): """ Public API method that returns a scalar value representing the value of the desired asset's field at either the given dt. Parameters ---------- assets : Asset The asset or assets whose data is desired. This cannot be an arbitrary AssetConvertible. field : {'open', 'high', 'low', 'close', 'volume', 'price', 'last_traded'} The desired field of the asset. dt : pd.Timestamp The timestamp for the desired value. data_frequency : str The frequency of the data to query; i.e. whether the data is 'daily' or 'minute' bars Returns ------- value : float, int, or pd.Timestamp The spot value of ``field`` for ``asset`` The return type is based on the ``field`` requested. If the field is one of 'open', 'high', 'low', 'close', or 'price', the value will be a float. If the ``field`` is 'volume' the value will be a int. If the ``field`` is 'last_traded' the value will be a Timestamp. """ if data_frequency == 'minute': data_frequency = '1m' elif data_frequency == 'daily': data_frequency = '1d' prices = self.broker.get_realtime_bars([asset], data_frequency) if field == 'last_traded': return pd.Timestamp(prices[asset.symbol][-1:].index.get_values()[0]) elif field == 'volume': return prices[asset.symbol][field][-1] * 100 elif field == 'price': return prices[asset.symbol]['close'][-1] else: return prices[asset.symbol][field][-1]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/data_portal_live.py
data_portal_live.py
from functools import partial import psycopg2 import sqlalchemy as sa import zipline.config.data_backend from zipline.utils.db_utils import check_and_create_engine import pandas as pd import logbook import numpy as np from numpy import ( iinfo, nan, ) from pandas import ( NaT, read_csv, to_datetime, Timestamp, ) from six import iteritems, viewkeys from trading_calendars import get_calendar from zipline.data.session_bars import CurrencyAwareSessionBarReader from zipline.data.bar_reader import ( NoDataAfterDate, NoDataBeforeDate, NoDataOnDate, ) from zipline.utils.functional import apply from zipline.utils.input_validation import expect_element from zipline.utils.numpy_utils import float64_dtype from zipline.utils.memoize import lazyval from zipline.utils.cli import maybe_show_progress from ._equities import _compute_row_slices, _read_tape_data logger = logbook.Logger('PSqlDailyBars') OHLC = frozenset(['open', 'high', 'low', 'close']) US_EQUITY_PRICING_COLUMNS = ( 'open', 'high', 'low', 'close', 'volume', 'day', 'id' ) UINT32_MAX = iinfo(np.uint32).max TABLE = 'ohlcv_daily' class PSQLDailyBarReader(CurrencyAwareSessionBarReader): """ Reader for raw pricing data written by PSQLDailyBarWriter. Parameters ---------- table : bcolz.ctable The ctable contaning the pricing data, with attrs corresponding to the Attributes list below. read_all_threshold : int The number of equities at which; below, the data is read by reading a slice from the carray per asset. above, the data is read by pulling all of the data for all assets into memory and then indexing into that array for each day and asset pair. Used to tune performance of reads when using a small or large number of equities. Attributes ---------- The table with which this loader interacts contains the following attributes: first_row : dict Map from asset_id -> index of first row in the dataset with that id. last_row : dict Map from asset_id -> index of last row in the dataset with that id. calendar_offset : dict Map from asset_id -> calendar index of first row. start_session_ns: int Epoch ns of the first session used in this dataset. end_session_ns: int Epoch ns of the last session used in this dataset. calendar_name: str String identifier of trading calendar used (ie, "NYSE"). We use first_row and last_row together to quickly find ranges of rows to load when reading an asset's data into memory. We use calendar_offset and calendar to orient loaded blocks within a range of queried dates. Notes ------ A Bcolz CTable is comprised of Columns and Attributes. The table with which this loader interacts contains the following columns: ['open', 'high', 'low', 'close', 'volume', 'day', 'id']. The data in these columns is interpreted as follows: - Price columns ('open', 'high', 'low', 'close') are interpreted as 1000 * as-traded dollar value. - Volume is interpreted as as-traded volume. - Day is interpreted as seconds since midnight UTC, Jan 1, 1970. - Id is the asset id of the row. The data in each column is grouped by asset and then sorted by day within each asset block. The table is built to represent a long time range of data, e.g. ten years of equity data, so the lengths of each asset block is not equal to each other. The blocks are clipped to the known start and end date of each asset to cut down on the number of empty values that would need to be included to make a regular/cubic dataset. When read across the open, high, low, close, and volume with the same index should represent the same asset and day. See Also -------- zipline.data.bcolz_daily_bars.BcolzDailyBarWriter """ def __init__(self, path, read_all_threshold=3000): self.conn = check_and_create_engine(path, False) # Cache of fully read np.array for the carrays in the daily bar table. # raw_array does not use the same cache, but it could. # Need to test keeping the entire array in memory for the course of a # process first. self._spot_cols = {} self._read_all_threshold = read_all_threshold # caching the calendar-sessions like this prevent problems during ingestion # where the reader is first initialized when there are still no bars self._sessions = pd.DatetimeIndex([], dtype='datetime64[ns, UTC]', freq='C') self._calendar_offsets_c = {} self._first_rows_c = {} self._last_rows_c = {} self._first_trading_day_c = {} @property def sessions(self): if self._sessions.empty: outer_dates = pd.read_sql('SELECT MIN(day) as min_day, MAX(day) as max_day FROM ohlcv_daily', self.conn) start_session = Timestamp(outer_dates['min_day'][0], tz='UTC') end_session = Timestamp(outer_dates['max_day'][0], tz='UTC') calendar_name = 'XNYS' # NYSE for POC only cal = get_calendar(calendar_name) self._sessions = cal.sessions_in_range(start_session, end_session) return self._sessions @lazyval def first_trading_day(self): return Timestamp( self._first_trading_day, unit='s', tz='UTC' ) @lazyval def trading_calendar(self): return get_calendar('XNYS') @property def last_available_dt(self): return self.sessions[-1] @property def _calendar_offsets(self): if not self._calendar_offsets_c: self._calendar_offsets_c = self._get_calendar_offsets() return self._calendar_offsets_c def _get_calendar_offsets(self): info = pd.read_sql('SELECT id, MIN(day) AS start FROM ohlcv_daily GROUP BY id ORDER BY id', self.conn) sessions = self.sessions if len(sessions) == 0: return {} offsets = {} for i in range(len(info['id'])): first_session = Timestamp(info['start'][i], tz='UTC') offsets[info['id'][i]] = sessions.get_loc(first_session) return offsets @property def _first_trading_day(self): if not self._first_trading_day_c: self._first_trading_day_c = self._get_first_trading_day() return self._first_trading_day_c @property def _last_rows(self): if not self._last_rows_c: self._first_rows_c, self._last_rows_c = self._get_first_and_last_rows() return self._last_rows_c @property def _first_rows(self): if not self._first_rows_c: self._first_rows_c, self._last_rows_c = self._get_first_and_last_rows() return self._first_rows_c def _get_first_and_last_rows(self): info = pd.read_sql('SELECT id, COUNT(day) AS ct FROM ohlcv_daily GROUP BY id ORDER BY id', self.conn) first_rows = {} last_rows = {} total = 0 length = len(info['id']) for i in range(length): total = total + info['ct'][i] if i == 0: first_rows[info['id'][i]] = 0 last_rows[info['id'][i]] = total - 1 if i > 0: first_rows[info['id'][i]] = last_rows[last_id] + 1 last_id = info['id'][i] return first_rows, last_rows def _get_first_trading_day(self): result = pd.read_sql('SELECT MIN(day) AS first_day FROM ohlcv_daily', self.conn) return result.first_day.iloc[0] def _compute_slices(self, start_idx, end_idx, assets): """ Compute the raw row indices to load for each asset on a query for the given dates after applying a shift. Parameters ---------- start_idx : int Index of first date for which we want data. end_idx : int Index of last date for which we want data. assets : pandas.Int64Index Assets for which we want to compute row indices Returns ------- A 3-tuple of (first_rows, last_rows, offsets): first_rows : np.array[intp] Array with length == len(assets) containing the index of the first row to load for each asset in `assets`. last_rows : np.array[intp] Array with length == len(assets) containing the index of the last row to load for each asset in `assets`. offset : np.array[intp] Array with length == (len(asset) containing the index in a buffer of length `dates` corresponding to the first row of each asset. The value of offset[i] will be 0 if asset[i] existed at the start of a query. Otherwise, offset[i] will be equal to the number of entries in `dates` for which the asset did not yet exist. """ # The core implementation of the logic here is implemented in Cython # for efficiency. return _compute_row_slices( self._first_rows, self._last_rows, self._calendar_offsets, start_idx, end_idx, assets, ) def _load_raw_arrays_date_to_index(self, date): try: return self.sessions.get_loc(date) except KeyError: raise NoDataOnDate(date) def load_raw_arrays(self, columns, start_date, end_date, assets): for col in columns: self._spot_col(col) start_idx = self._load_raw_arrays_date_to_index(start_date) end_idx = self._load_raw_arrays_date_to_index(end_date) first_rows, last_rows, offsets = self._compute_slices( start_idx, end_idx, assets, ) read_all = len(assets) > self._read_all_threshold tape = _read_tape_data( self._spot_cols, (end_idx - start_idx + 1, len(assets)), list(columns), first_rows, last_rows, offsets, read_all, ) return tape def load_raw_arrays_slow(self, columns, start_date, end_date, assets): result = [] sessions = self.sessions[self.sessions.get_loc(start_date): self.sessions.get_loc(end_date) + 1] for column in columns: column_vals = [] for session in sessions: row_vals = [] for asset in assets: try: row_vals.append(self.get_value(int(asset), session, column)) except NoDataBeforeDate: row_vals.append(np.nan) column_vals.append(row_vals) result.append(np.array(column_vals)) return result def _spot_col(self, colname): """ Get the colname from daily_bar_table and read all of it into memory, caching the result. Parameters ---------- colname : string A name of a OHLCV carray in the daily_bar_table Returns ------- array (uint32) Full read array of the carray in the daily_bar_table with the given colname. """ try: col = self._spot_cols[colname] except KeyError: result = pd.read_sql(f'SELECT {colname} FROM ohlcv_daily ORDER BY id, day', self.conn)[colname].values col = self._spot_cols[colname] = np.array(result) return col def get_last_traded_dt(self, asset, day): volumes = self._spot_col('volume') search_day = day while True: try: ix = self.sid_day_index(asset, search_day) except NoDataBeforeDate: return NaT except NoDataAfterDate: prev_day_ix = self.sessions.get_loc(search_day) - 1 if prev_day_ix > -1: search_day = self.sessions[prev_day_ix] continue except NoDataOnDate: return NaT if volumes[ix] != 0: return search_day prev_day_ix = self.sessions.get_loc(search_day) - 1 if prev_day_ix > -1: search_day = self.sessions[prev_day_ix] else: return NaT def sid_day_index(self, sid, day): """ all data for all assets is stored sequentially. to get the right values we must find the index for this sid and this day. so we calculate the offset in this long array. Parameters ---------- sid : int The asset identifier. day : datetime64-like Midnight of the day for which data is requested. Returns ------- int Index into the data tape for the given sid and day. Raises a NoDataOnDate exception if the given day and sid is before or after the date range of the equity. """ try: day_loc = self.sessions.get_loc(day) except Exception: raise NoDataOnDate("day={0} is outside of calendar={1}".format( day, self.sessions)) offset = day_loc - self._calendar_offsets[sid] if offset < 0: raise NoDataBeforeDate( "No data on or before day={0} for sid={1}".format( day, sid)) ix = self._first_rows[sid] + offset if ix > self._last_rows[sid]: raise NoDataAfterDate( "No data on or after day={0} for sid={1}".format( day, sid)) return ix def get_value(self, sid, dt, field): """ Parameters ---------- sid : int The asset identifier. day : datetime64-like Midnight of the day for which data is requested. colname : string The price field. e.g. ('open', 'high', 'low', 'close', 'volume') Returns ------- float The spot price for colname of the given sid on the given day. Raises a NoDataOnDate exception if the given day and sid is before or after the date range of the equity. Returns -1 if the day is within the date range, but the price is 0. """ ix = self.sid_day_index(sid, dt) price = self._spot_col(field)[ix] if field != 'volume': if price == 0: return nan else: return price else: return price def currency_codes(self, sids): # XXX: This is pretty inefficient. This reader doesn't really support # country codes, so we always either return USD or None if we don't # know about the sid at all. first_rows = self._first_rows out = [] for sid in sids: if sid in first_rows: out.append('USD') else: out.append(None) return np.array(out, dtype=object) class PSQLDailyBarWriter(object): """ Class capable of writing daily OHLCV data to disk in a format that can be read efficiently by PSQLDailyOHLCVReader. Parameters ---------- filename : str The location at which we should write our output. calendar : zipline.utils.calendar.trading_calendar Calendar to use to compute asset calendar offsets. start_session: pd.Timestamp Midnight UTC session label. end_session: pd.Timestamp Midnight UTC session label. See Also -------- zipline.data.bcolz_daily_bars.BcolzDailyBarReader """ _csv_dtypes = { 'open': float64_dtype, 'high': float64_dtype, 'low': float64_dtype, 'close': float64_dtype, 'volume': float64_dtype, } def __init__(self, db_path, calendar, start_session, end_session): self.conn = check_and_create_engine(db_path, False) if start_session != end_session: if not calendar.is_session(start_session): raise ValueError( "Start session %s is invalid!" % start_session ) if not calendar.is_session(end_session): raise ValueError( "End session %s is invalid!" % end_session ) self._start_session = start_session self._end_session = end_session self._calendar = calendar try: self.conn.connect() except sa.exc.OperationalError: # can't connect to db. might mean that the database is not created yey. # let's create it. (happens in first time usage) self.ensure_database(db_path) self.ensure_table() def ensure_database(self, db_path): """ create the bundle database. it will have the name of the bundle :param db_path: expected db path (table). used to get the bundle name. """ db_config = zipline.config.data_backend.PostgresDB() host = db_config.host port = db_config.port user = db_config.user password = db_config.password conn = psycopg2.connect( database="", user=user, password=password, host=host, port=port ) conn.autocommit = True # Creating a cursor object using the cursor() method cursor = conn.cursor() bundle_name = db_path.split("/")[-1] sql = f'CREATE database {bundle_name}' # Creating a database cursor.execute(sql) print(f"Database {bundle_name} created successfully........") def ensure_table(self): metadata = sa.MetaData() ohlcv_daily = sa.Table( 'ohlcv_daily', metadata, sa.Column('id', sa.Integer()), sa.Column('day', sa.Date()), sa.Column('open', sa.Float()), sa.Column('high', sa.Float()), sa.Column('low', sa.Float()), sa.Column('close', sa.Float()), sa.Column('volume', sa.BigInteger()), ) sa.Index('id_day', ohlcv_daily.c.id, ohlcv_daily.c.day) metadata.create_all(self.conn) @property def progress_bar_message(self): return "Merging daily equity files:" def progress_bar_item_show_func(self, value): return value if value is None else str(value[0]) def write(self, data, assets=None, show_progress=False, invalid_data_behavior='warn'): """ Parameters ---------- data : iterable[tuple[int, pandas.DataFrame or bcolz.ctable]] The data chunks to write. Each chunk should be a tuple of sid and the data for that asset. assets : set[int], optional The assets that should be in ``data``. If this is provided we will check ``data`` against the assets and provide better progress information. show_progress : bool, optional Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'}, optional What to do when data is encountered that is outside the range of a uint32. Returns ------- table : bcolz.ctable The newly-written table. """ ctx = maybe_show_progress( ( (sid, self._write_to_postgres(sid, df, invalid_data_behavior)) for sid, df in data ), show_progress=show_progress, item_show_func=self.progress_bar_item_show_func, label=self.progress_bar_message, length=len(assets) if assets is not None else None, ) with ctx as it: return self._write_internal(it, assets) def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior='warn'): """Read CSVs as DataFrames from our asset map. Parameters ---------- asset_map : dict[int -> str] A mapping from asset id to file path with the CSV data for that asset show_progress : bool Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is encountered that is outside the range of a uint32. """ read = partial( read_csv, parse_dates=['day'], index_col='day', dtype=self._csv_dtypes, ) return self.write( ((asset, read(path)) for asset, path in iteritems(asset_map)), assets=viewkeys(asset_map), show_progress=show_progress, invalid_data_behavior=invalid_data_behavior, ) def _write_internal(self, iterator, assets): """ Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, dataframe). """ if assets is not None: @apply def iterator(iterator=iterator, assets=set(assets)): for asset_id, table in iterator: if asset_id not in assets: logger.warning(f"unknown asset id {asset_id}. skipping.") continue yield asset_id, table for asset_id, table in iterator: # when writing to db, drop timezone, will crash otherwise if not table.empty: table.index = table.index.tz_localize(None) table.to_sql('ohlcv_daily', self.conn, if_exists='append') def _ensure_sessions_consistency(self, data_slice, invalid_data_behavior): """ check that we have exactly the amount of days we expect by checking the start and end dates counting the active days in between using the trading calendar data """ val = True if not data_slice.empty: first_day = data_slice.index[0] last_day = data_slice.index[-1] asset_sessions = self._calendar.sessions_in_range(first_day, last_day) if len(data_slice) != len(asset_sessions): err_msg = ( 'Got {} rows for daily bars table with first day={}, last ' 'day={}, expected {} rows.\n' 'Missing sessions: {}\n' 'Extra sessions: {}'.format( len(data_slice), first_day, last_day, len(asset_sessions), asset_sessions.difference( to_datetime( np.array(data_slice.index), unit='s', utc=True, ) ).tolist(), to_datetime( np.array(data_slice.index), unit='s', utc=True, ).difference(asset_sessions).tolist(), ) ) val = False logger.warning(err_msg) return val @expect_element(invalid_data_behavior={'warn', 'raise', 'ignore'}) def _write_to_postgres(self, sid, data: pd.DataFrame, invalid_data_behavior): result = self._format_df_columns_and_index(data, sid) if not result.empty: # set proper id data['id'] = sid edge_days = self._get_exisiting_data_dates_from_db(sid) if not self._data_for_sid_already_exist_in_db(edge_days): # this asset is still not in the DB. we write everything we got if self._ensure_sessions_consistency(data, invalid_data_behavior): # data is not consistent. we will not write anything to db result = data else: result = pd.DataFrame(columns=data.columns) else: result = self._validate_data_consistency_on_edges(sid, data, edge_days, invalid_data_behavior) return result def _validate_data_consistency_on_edges(self, sid, data, edge_days, invalid_data_behavior): """ there's already data in the db for this sid. we may append data at the beginning and/or end. before we do that, we must make sure that both segments are consistent. note: we could make a better effort by loosing up restriction and if one segment is corrupted still accept the other one. """ first_day = edge_days['first_day'][0] last_day = edge_days['last_day'][0] before_slice = data[data.index.tz_convert(None) < first_day] after_slice = data[data.index.tz_convert(None) > last_day] # check if before-slice and after-slice are aligned with data in db # e.g. don't allow gaps in terms of sessions. should be exactly two # sessions (sessions on the edge of the data and the slice) consistent_data = True if not before_slice.empty: backward_gap = len(self._calendar.sessions_in_range(before_slice.index[-1], first_day)) if backward_gap != 2: # max allowed gap for consistent data is 2 logger.warning(f"data for {sid} contains backward gaps {backward_gap} " f"and not consistent. will not be written to db.") consistent_data = False if not after_slice.empty: forward_gap = len(self._calendar.sessions_in_range(last_day, after_slice.index[-1])) if forward_gap != 2: logger.warning(f"data for {sid} contains forward gaps {forward_gap} " f"and not consistent. will not be written to db.") consistent_data = False if not self._ensure_sessions_consistency(before_slice, invalid_data_behavior) or not \ self._ensure_sessions_consistency(after_slice, invalid_data_behavior): consistent_data = False if consistent_data: result = before_slice.append(after_slice) else: result = pd.DataFrame(columns=data.columns) return result def _data_for_sid_already_exist_in_db(self, edges: pd.DataFrame) -> bool: """ edges is a query performed for sid in db. if it's empty it means the db doesn't contain data for this sid yet. :return: bool """ return not pd.isnull(edges['first_day'].iloc[0]) def _get_exisiting_data_dates_from_db(self, sid): """ using the sid- query the db and get the dates (start and end) for data stored in db :param sid: :return: """ edge_days = pd.read_sql( f'SELECT MAX(day) as last_day, MIN(day) as first_day ' f'FROM ohlcv_daily WHERE id = {sid}', self.conn, parse_dates=['last_day', 'first_day'] ) return edge_days def _format_df_columns_and_index(self, data: pd.DataFrame, sid): """ make sure that the data received is in the structure we expect columns and index wise. :param data: data from data bundle :param sid: sid as it should be stored in db :return: formatted data or empty df if the data is corrupted """ result = pd.DataFrame(columns=data.columns) # rename index-column to day and convert it to datetime and utc if data.index[0].tzname() != 'UTC': data.index = [x.tz_convert('utc') for x in data.index] data.index.rename("day", inplace=True) # drop time-information, it will confuse the aligning-logic data.index = data.index.normalize() # check if we have all necessary columns corrupted_data = False for column in US_EQUITY_PRICING_COLUMNS: # id not necessary if column == 'id': continue if column not in list(data.columns) + [data.index.name]: msg = f"corrupted data for :{sid}. columns must contain day, open, high, low, close, volume" logger.warning(msg) corrupted_data = True break if not corrupted_data: # drop columns not of interest cols_to_drop = [column for column in data.columns if column not in US_EQUITY_PRICING_COLUMNS] data.drop(columns=cols_to_drop, inplace=True) result = data return result
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/psql_daily_bars.py
psql_daily_bars.py
from abc import ABCMeta, abstractmethod, abstractproperty from six import with_metaclass class NoDataOnDate(Exception): """ Raised when a spot price cannot be found for the sid and date. """ pass class NoDataBeforeDate(NoDataOnDate): pass class NoDataAfterDate(NoDataOnDate): pass class NoDataForSid(Exception): """ Raised when the requested sid is missing from the pricing data. """ pass OHLCV = ('open', 'high', 'low', 'close', 'volume') class BarReader(with_metaclass(ABCMeta, object)): @abstractproperty def data_frequency(self): pass @abstractmethod def load_raw_arrays(self, columns, start_date, end_date, assets): """ Parameters ---------- columns : list of str 'open', 'high', 'low', 'close', or 'volume' start_date: Timestamp Beginning of the window range. end_date: Timestamp End of the window range. assets : list of int The asset identifiers in the window. Returns ------- list of np.ndarray A list with an entry per field of ndarrays with shape (minutes in range, sids) with a dtype of float64, containing the values for the respective field over start and end dt range. """ pass @abstractproperty def last_available_dt(self): """ Returns ------- dt : pd.Timestamp The last session for which the reader can provide data. """ pass @abstractproperty def trading_calendar(self): """ Returns the zipline.utils.calendar.trading_calendar used to read the data. Can be None (if the writer didn't specify it). """ pass @abstractproperty def first_trading_day(self): """ Returns ------- dt : pd.Timestamp The first trading day (session) for which the reader can provide data. """ pass @abstractmethod def get_value(self, sid, dt, field): """ Retrieve the value at the given coordinates. Parameters ---------- sid : int The asset identifier. dt : pd.Timestamp The timestamp for the desired data point. field : string The OHLVC name for the desired data point. Returns ------- value : float|int The value at the given coordinates, ``float`` for OHLC, ``int`` for 'volume'. Raises ------ NoDataOnDate If the given dt is not a valid market minute (in minute mode) or session (in daily mode) according to this reader's tradingcalendar. """ pass @abstractmethod def get_last_traded_dt(self, asset, dt): """ Get the latest minute on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded minute. dt : pd.Timestamp The minute at which to start searching for the last traded minute. Returns ------- last_traded : pd.Timestamp The dt of the last trade for the given asset, using the input dt as a vantage point. """ pass
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bar_reader.py
bar_reader.py
import alpaca_trade_api as tradeapi from datetime import timedelta import numpy as np from os.path import isfile, join from pathlib import Path import pandas as pd import pickle from alpaca_trade_api.common import URL from dateutil import tz from trading_calendars import TradingCalendar import yaml from zipline.data.bundles import core as bundles from dateutil.parser import parse as date_parse user_home = str(Path.home()) custom_data_path = join(user_home, '.zipline/custom_data') CLIENT: tradeapi.REST = None NY = "America/New_York" def initialize_client(): global CLIENT with open("polygon.yaml", mode='r') as f: o = yaml.safe_load(f) key = o["key_id"] secret = o["secret"] base_url = o["base_url"] CLIENT = tradeapi.REST(key_id=key, secret_key=secret, base_url=URL(base_url)) ASSETS = None def list_assets(): global ASSETS if not ASSETS: ASSETS = [_.symbol for _ in CLIENT.list_assets()] # ASSETS = [_.ticker for _ in CLIENT.polygon.all_tickers()] return ASSETS # return ['AAPL', 'AA', 'TSLA', 'GOOG', 'MSFT'] def tickers_generator(): """ Return a tuple (sid, ticker_pair) """ tickers_file = join(custom_data_path, 'alpaca_ticker_pairs.pickle') if not isfile(tickers_file): ticker_pairs = list_assets() else: with open(tickers_file, 'rb') as f: ticker_pairs = pickle.load(f)[:] return (tuple((sid, ticker)) for sid, ticker in enumerate(ticker_pairs)) def iso_date(date_str): """ this method will make sure that dates are formatted properly as with isoformat :param date_str: :return: YYYY-MM-DD date formatted """ return date_parse(date_str).date().isoformat() def get_aggs_from_polygon(dataname, dtbegin, dtend, granularity, compression): """ so polygon has a much more convenient api for this than alpaca because we could insert the compression in to the api call and we don't need to resample it. but, at this point in time, something is not working properly and data is returned in segments. meaning, we have patches of missing data. e.g we request data from 2020-03-01 to 2020-07-01 and we get something like this: 2020-03-01:2020-03-15, 2020-06-25:2020-07-01 so that makes life difficult.. there's no way to know which patch will be returned and which one we should try to get again. so the solution must be, ask data in segments. I select an arbitrary time window of 2 weeks, and split the calls until we get all required data """ def _clear_out_of_market_hours(df): """ only interested in samples between 9:30, 16:00 NY time """ return df.between_time("09:30", "16:00") def _fillna(df, granularity, start, end): if granularity != 'day': return df if df.empty: return df calendar: TradingCalendar = trading_calendars.get_calendar("NYSE") last_val = df.iloc[0] current = start while current <= end: if calendar.is_session(current): if current.replace(tzinfo=tz.gettz(NY)) in df.index: last_val = df.loc[current.replace(tzinfo=tz.gettz(NY))] else: # df.loc[pytz.timezone(NY).localize(current)] = last_val df.loc[current.replace(tzinfo=tz.gettz(NY))] = last_val current += timedelta(days=1) return df if granularity == 'day': cdl = CLIENT.polygon.historic_agg_v2( dataname, compression, granularity, _from=iso_date(dtbegin.isoformat()), to=iso_date(dtend.isoformat())).df cdl = _fillna(cdl, granularity, dtbegin, dtend) else: cdl = pd.DataFrame() segment_start = dtbegin segment_end = segment_start + timedelta(weeks=2) if \ dtend - dtbegin >= timedelta(weeks=2) else dtend while segment_end <= dtend and dtend not in cdl.index: response = CLIENT.polygon.historic_agg_v2( dataname, compression, granularity, _from=iso_date(segment_start.isoformat()), to=iso_date(segment_end.isoformat())) # No result from the server, most likely error if response.df.shape[0] == 0 and cdl.shape[0] == 0: raise Exception("received empty response") temp = response.df cdl = pd.concat([cdl, temp]) cdl = cdl[~cdl.index.duplicated()] segment_start = segment_end segment_end = segment_start + timedelta(weeks=2) if \ dtend - dtbegin >= timedelta(weeks=2) else dtend cdl = _clear_out_of_market_hours(cdl) return cdl def df_generator(interval, start, end): exchange = 'NYSE' for sid, symbol in enumerate(list_assets()): try: df = get_aggs_from_polygon(symbol, start, end, 'day' if interval == '1d' else 'minute', 1) if df.empty: continue start_date = df.index[0] end_date = df.index[-1] first_traded = start auto_close_date = end + pd.Timedelta(days=1) # # Check if there is any missing session; skip the ticker pair otherwise # if interval == '1d' and len(df.index) - 1 != pd.Timedelta(end_date - start_date).days: # # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) # continue # elif interval == '1m' and timedelta(minutes=(len(df.index) + 60)) != end_date - start_date: # # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) # continue yield (sid, df.sort_index()), symbol, start, end, first_traded, auto_close_date, exchange except Exception as e: import traceback traceback.print_exc() print(f"error while processig {(sid, symbol)}: {e}") def metadata_df(): metadata_dtype = [ ('symbol', 'object'), # ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata_df = pd.DataFrame( np.empty(len(list_assets()), dtype=metadata_dtype)) return metadata_df @bundles.register('polygon_api', calendar_name="NYSE", minutes_per_day=390) def api_to_bundle(interval=['1m']): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): def minute_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1m', start=start_session, end=end_session)) def daily_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1d', start=start_session, end=end_session)) for _interval in interval: metadata = metadata_df() if _interval == '1d': daily_bar_writer.write(daily_data_generator(), show_progress=True) elif _interval == '1m': minute_bar_writer.write( minute_data_generator(), show_progress=True) # Drop the ticker rows which have missing sessions in their data sets metadata.dropna(inplace=True) asset_db_writer.write(equities=metadata) print(metadata) adjustment_writer.write() return ingest if __name__ == '__main__': from zipline.data.bundles import register from zipline.data import bundles as bundles_module import trading_calendars import os cal: TradingCalendar = trading_calendars.get_calendar('NYSE') start_date = pd.Timestamp('2019-08-03 0:00', tz='utc') while not cal.is_session(start_date): start_date += timedelta(days=1) end_date = pd.Timestamp('now', tz='utc').date() - timedelta(days=1) while not cal.is_session(end_date): end_date -= timedelta(days=1) end_date = pd.Timestamp(end_date, tz='utc') initialize_client() register( 'polygon_api', # api_to_bundle(interval=['1d', '1m']), # api_to_bundle(interval=['1m']), api_to_bundle(interval=['1d']), calendar_name='NYSE', start_session=start_date, end_session=end_date ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "polygon_api", os.environ, assets_versions=assets_version, show_progress=True, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/___polygon_api.py
___polygon_api.py
import os import bs4 as bs from binance.client import Client import csv from datetime import datetime as dt from datetime import timedelta import numpy as np from os import listdir, mkdir, remove from os.path import exists, isfile, join from pathlib import Path import pandas as pd import pickle import requests from trading_calendars import register_calendar # from trading_calendars.exchange_calendar_binance import BinanceExchangeCalendar import yaml from zipline.data.bundles import core as bundles # Set up the directories where we are going to save those csv files user_home = str(Path.home()) csv_data_path = join(os.environ["ZIPLINE_ROOT"], 'custom_data/binance/csv') custom_data_path = join(user_home, 'custom_data/binance') CLIENT: Client = None def initialize_client(): global CLIENT with open("binance.yaml", mode='r') as f: o = yaml.safe_load(f) key = o["key_id"] secret = o["secret"] CLIENT = Client(key, secret) def get_binance_pairs(**kwargs): base_currencies = kwargs.get('base_currencies', '') quote_currencies = kwargs.get('quote_currencies', '') binance_pairs = list() all_tickers = CLIENT.get_all_tickers() # if not self.futures: # all_tickers = CLIENT.get_all_tickers() # else: # all_tickers = CLIENT.futures_ticker() if base_currencies and quote_currencies: input_pairs = [x + y for x in quote_currencies for y in base_currencies] for x, currency_pair in enumerate(all_tickers): if base_currencies and quote_currencies: for pair in input_pairs: if currency_pair['symbol'] == pair.upper(): binance_pairs.append(currency_pair['symbol']) break elif base_currencies: for base_currency in base_currencies: if currency_pair['symbol'][-len(base_currency):] == base_currency.upper(): binance_pairs.append(currency_pair['symbol']) break elif quote_currencies: for quote_currency in quote_currencies: if currency_pair['symbol'][:len(quote_currency)] == quote_currency.upper(): binance_pairs.append(currency_pair['symbol']) break else: binance_pairs.append(currency_pair['symbol']) if binance_pairs: return binance_pairs else: raise ValueError('Invalid Input: Binance returned no matching currency pairs.') def tickers(): """ Save Binance trading pair tickers to a pickle file Return a pickle """ cmc_binance_url = 'https://coinmarketcap.com/exchanges/binance/' response = requests.get(cmc_binance_url) if response.ok: soup = bs.BeautifulSoup(response.text, 'html.parser') table = soup.find('table', {'id': 'exchange-markets'}) ticker_pairs = [] for row in table.findAll('tr')[1:]: ticker_pair = row.findAll('td')[2].text ticker_pairs.append(ticker_pair.strip().replace('/', '')) if not exists(custom_data_path): mkdir(custom_data_path) with open(join(custom_data_path, 'binance_ticker_pairs.pickle'), 'wb') as f: pickle.dump(ticker_pairs, f) return ticker_pairs def save_csv(reload_tickers=True, interval='1m'): """ Save Zipline bundle ready csv for Binance trading ticker pair :param reload_tickers: True or False :type reload_tickers: boolean :param interval: Default 1m. 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M :type interval: str """ if not exists(csv_data_path): mkdir(csv_data_path) if reload_tickers: ticker_pairs = get_binance_pairs() else: ticker_pickle = join( custom_data_path, 'binance_ticker_pairs.pickle') with open(ticker_pickle, 'rb') as f: ticker_pairs = pickle.load(f) start = '2017-7-14' # Binance launch date end = dt.utcnow().strftime('%Y-%m-%d') # Current day csv_filenames = [csv_filename for csv_filename in listdir( csv_data_path) if isfile(join(csv_data_path, csv_filename))] for _interval in interval: for ticker_pair in ticker_pairs: filename = "Binance_{}_{}.csv".format(ticker_pair, _interval) if csv_filenames != [] and filename in csv_filenames: remove(join(csv_data_path, filename)) output = join(csv_data_path, filename) klines = CLIENT.get_historical_klines_generator( ticker_pair, _interval, start, end) for index, kline in enumerate(klines): with open(output, 'a+') as f: writer = csv.writer(f) if index == 0: writer.writerow( ['date', 'open', 'high', 'low', 'close', 'volume']) # Make a real copy of kline # Binance API forbids the change of open time line = kline[:] del line[6:] line[0] = np.datetime64(line[0], 'ms') line[0] = pd.Timestamp(line[0], 'ms') writer.writerow(line) print('{} saved.'.format(filename)) return [file for file in listdir(csv_data_path) if isfile(join(csv_data_path, file))] @bundles.register('binance_CSV', calendar_name="24/7", minutes_per_day=1440) def csv_to_bundle(reload_tickers=True, reload_csv=True, interval='1m'): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): if reload_csv: csv_filenames = save_csv( reload_tickers=reload_tickers, interval=interval) else: csv_filenames = [file for file in listdir( csv_data_path) if isfile(join(csv_data_path, file))] ticker_pairs = [{'exchange': pair.split('_')[0], 'symbol': pair.split('_')[1], 'interval':pair.split('_')[2].split('.')[0], 'file_path':join(csv_data_path, pair)} for pair in csv_filenames] metadata_dtype = [ ('symbol', 'object'), ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata = pd.DataFrame( np.empty(len(ticker_pairs), dtype=metadata_dtype)) minute_data_sets = [] daily_data_sets = [] for sid, ticker_pair in enumerate(ticker_pairs): df = pd.read_csv(ticker_pair['file_path'], index_col=['date'], parse_dates=['date']) symbol = ticker_pair['symbol'] asset_name = ticker_pair['symbol'] start_date = df.index[0] end_date = df.index[-1] first_traded = start_date auto_close_date = end_date + pd.Timedelta(days=1) exchange = ticker_pair['exchange'] # Update metadata metadata.iloc[sid] = symbol, asset_name, start_date, end_date, first_traded, auto_close_date, exchange if ticker_pair['interval'] == '1m': minute_data_sets.append((sid, df)) if ticker_pair['interval'] == '1d': daily_data_sets.append((sid, df)) if minute_data_sets != []: # Dealing with missing sessions in some data sets for daily_data_set in daily_data_sets: try: minute_bar_writer.write( [daily_data_set], show_progress=True) except Exception as e: print(e) if daily_data_sets != []: # Dealing with missing sessions in some data sets for daily_data_set in daily_data_sets: try: daily_bar_writer.write( [daily_data_set], show_progress=True) except Exception as e: print(e) metadata['exchange'] = "Binance" asset_db_writer.write(equities=metadata) print(metadata) adjustment_writer.write() return ingest if __name__ == '__main__': from zipline.data.bundles import register from zipline.data import bundles as bundles_module import os initialize_client() register( 'binance_csv', # csv_to_bundle(interval=['1d', '1m']), csv_to_bundle(interval=['1m']), # csv_to_bundle(interval=['1d']), calendar_name='24/7', ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "binance_csv", os.environ, pd.Timestamp.utcnow(), assets_version, True, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/binance_csv.py
binance_csv.py
import collections import alpaca_trade_api as tradeapi from datetime import timedelta, time as dtime import numpy as np from pathlib import Path import pandas as pd import pytz from alpaca_trade_api.common import URL from dateutil import tz from trading_calendars import TradingCalendar import zipline.config from zipline.data.bundles import core as bundles from zipline.data.bundles.common import asset_to_sid_map from zipline.data.bundles.universe import Universe, all_alpaca_assets, get_sp500, get_sp100, get_nasdaq100 from dateutil.parser import parse as date_parse user_home = str(Path.home()) CLIENT: tradeapi.REST = None NY = "America/New_York" def initialize_client(): global CLIENT conf = zipline.config.bundle.AlpacaConfig() key = conf.key secret = conf.secret base_url = conf.base_url CLIENT = tradeapi.REST(key_id=key, secret_key=secret, base_url=URL(base_url)) ASSETS = None def list_assets(): global ASSETS if not ASSETS: conf = zipline.config.bundle.AlpacaConfig() custom_asset_list = conf.custom_asset_list if custom_asset_list: custom_asset_list = custom_asset_list.strip().replace(" ", "").split(",") ASSETS = list(set(custom_asset_list)) else: try: universe = Universe[conf.universe] except: universe = Universe.ALL if universe == Universe.ALL: ASSETS = all_alpaca_assets(CLIENT) elif universe == Universe.SP100: ASSETS = get_sp100() elif universe == Universe.SP500: ASSETS = get_sp500() elif universe == Universe.NASDAQ100: ASSETS = get_nasdaq100() ASSETS = list(set(ASSETS)) return ASSETS def iso_date(date_str): """ this method will make sure that dates are formatted properly as with isoformat :param date_str: :return: YYYY-MM-DD date formatted """ return date_parse(date_str).date().isoformat() def get_aggs_from_alpaca(symbols, start, end, granularity, compression=1): """ https://alpaca.markets/docs/api-documentation/api-v2/market-data/bars/ Alpaca API as a limit of 1000 records per api call. meaning, we need to do multiple calls to get all the required data if the date range is large. also, the alpaca api does not support compression (or, you can't get 5 minute bars e.g) so we need to resample the received bars. also, we need to drop out of market records. this function does all of that. note: this was the old way of getting the data response = CLIENT.get_aggs(dataname, compression, granularity, self.iso_date(start_dt), self.iso_date(end_dt)) the thing is get_aggs work nicely for days but not for minutes, and it is not a documented API. barset on the other hand does but we need to manipulate it to be able to work with it smoothly and return data the same way polygon does """ def _iterate_api_calls(): """ you could get max 1000 samples from the server. if we need more than that we need to do several api calls. currently the alpaca api supports also 5Min and 15Min so we could optimize server communication time by addressing timeframes """ got_all = False curr = end response: pd.DataFrame = pd.DataFrame([]) while not got_all: if granularity == 'minute' and compression == 5: timeframe = "5Min" elif granularity == 'minute' and compression == 15: timeframe = "15Min" else: timeframe = granularity r = CLIENT.get_barset(symbols, timeframe, limit=1000, end=curr.isoformat() ) if r: response = r.df if response.empty else pd.concat([r.df, response]) response.sort_index(inplace=True) if response.index[0] <= (pytz.timezone(NY).localize( start) if not start.tzname() else start): got_all = True else: delta = timedelta(days=1) if granularity == "day" \ else timedelta(minutes=1) curr = response.index[0] - delta else: # no more data is available, let's return what we have break return response def _fillna(df, granularity, start, end): if granularity != 'day': return df if df.empty: return df calendar: TradingCalendar = trading_calendars.get_calendar("NYSE") last_val = df.iloc[0] current = start while current <= end: if calendar.is_session(current): if current.replace(tzinfo=tz.gettz(NY)) in df.index: last_val = df.loc[current.replace(tzinfo=tz.gettz(NY))] else: # df.loc[pytz.timezone(NY).localize(current)] = last_val df.loc[current.replace(tzinfo=tz.gettz(NY))] = last_val current += timedelta(days=1) return df def _clear_out_of_market_hours(df): """ only interested in samples between 9:30, 16:00 NY time """ return df.between_time("09:30", "16:00") def _drop_early_samples(df): """ samples from server don't start at 9:30 NY time let's drop earliest samples """ for i, b in df.iterrows(): if i.time() >= dtime(9, 30): return df[i:] def _resample(df): """ samples returned with certain window size (1 day, 1 minute) user may want to work with different window size (5min) """ if granularity == 'minute': sample_size = f"{compression}Min" else: sample_size = f"{compression}D" df = df.resample(sample_size).agg( collections.OrderedDict([ ('open', 'first'), ('high', 'max'), ('low', 'min'), ('close', 'last'), ('volume', 'sum'), ]) ) if granularity == 'minute': return df.between_time("09:30", "16:00") else: return df if not start: response = CLIENT.get_barset(symbols, granularity, limit=1000, end=end).df else: response = _iterate_api_calls() cdl = response if granularity == 'minute': cdl = _clear_out_of_market_hours(cdl) cdl = _drop_early_samples(cdl) if compression != 1: response = _resample(cdl) # response = _back_to_aggs(cdl) else: response = cdl if granularity == 'day': response = response[start:end] # we only want data between dates processed = pd.DataFrame([], columns=response.columns) for sym in response.columns.levels[0]: df: pd.DataFrame = response[sym] df = df.dropna() df = _fillna(df, granularity, start, end) if processed.empty and not df.empty: processed = processed.reindex(df.index.values) if not df.empty: processed[sym] = df return processed MAX_PER_REQUEST_AMOUNT = 200 # Alpaca max symbols per 1 http request def df_generator(interval, start, end, assets_to_sids): exchange = 'NYSE' asset_list = list_assets() base_sid = 0 # some symbols from alpaca are duplicated, which causes an issue with zipline # ingest process. for now, we make sure we serve one of them (for now the first one) already_ingested = {} for i in range(len(asset_list[::MAX_PER_REQUEST_AMOUNT])): partial = asset_list[MAX_PER_REQUEST_AMOUNT*i:MAX_PER_REQUEST_AMOUNT*(i+1)] df: pd.DataFrame = get_aggs_from_alpaca(partial, start, end, 'day' if interval == '1d' else 'minute', 1) for _, symbol in enumerate(df.columns.levels[0]): try: sid = assets_to_sids[symbol] # doing this makes sure not all data in df is null # isnull returns 0 and 1 matrix. # doing sum twice, makes sure there isn't even one NaN value # and since we do ffill of the data, that should not happen # if df[symbol].isnull().sum().sum() == 0: if not df[symbol].isnull().all().all(): if symbol not in already_ingested: first_traded = start auto_close_date = end + pd.Timedelta(days=1) yield (sid, df[symbol].sort_index()), symbol, start, end, first_traded, auto_close_date, exchange already_ingested[symbol] = True except Exception as e: import traceback traceback.print_exc() print(f"error while processig {(sid + base_sid, symbol)}: {e}") def metadata_df(): metadata_dtype = [ ('symbol', 'object'), # ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata_df = pd.DataFrame( np.empty(len(list_assets()), dtype=metadata_dtype)) return metadata_df @bundles.register('alpaca_api', calendar_name="NYSE", minutes_per_day=390) def api_to_bundle(interval=['1m']): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): assets_to_sids = asset_to_sid_map(asset_db_writer.asset_finder, list_assets()) def minute_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1m', start=start_session, end=end_session, assets_to_sids=assets_to_sids)) def daily_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1d', start=start_session, end=end_session, assets_to_sids=assets_to_sids)) for _interval in interval: metadata = metadata_df() if _interval == '1d': daily_bar_writer.write(daily_data_generator(), assets=assets_to_sids.values(), show_progress=True) elif _interval == '1m': minute_bar_writer.write( minute_data_generator(), assets=assets_to_sids.values(), show_progress=True) # Drop the ticker rows which have missing sessions in their data sets metadata.dropna(inplace=True) asset_db_writer.write(equities=metadata) print(metadata) adjustment_writer.write() return ingest if __name__ == '__main__': from zipline.data.bundles import register from zipline.data import bundles as bundles_module import trading_calendars import os cal: TradingCalendar = trading_calendars.get_calendar('NYSE') end_date = pd.Timestamp('now', tz='utc').date() - timedelta(days=1) while not cal.is_session(str(end_date)): end_date -= timedelta(days=1) end_date = pd.Timestamp(end_date, tz='utc') # start_date = pd.Timestamp('2020-10-03 0:00', tz='utc') # while not cal.is_session(start_date): # start_date += timedelta(days=1) start_date = end_date - timedelta(days=365) while not cal.is_session(start_date): start_date -= timedelta(days=1) initialize_client() import time start_time = time.time() register( 'alpaca_api', # api_to_bundle(interval=['1d', '1m']), # api_to_bundle(interval=['1m']), api_to_bundle(interval=['1d']), calendar_name='NYSE', start_session=start_date, end_session=end_date ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "alpaca_api", os.environ, assets_versions=assets_version, show_progress=True, ) print(f"--- It took {timedelta(seconds=time.time() - start_time)} ---")
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/alpaca_api.py
alpaca_api.py
import os import sys from logbook import Logger, StreamHandler from numpy import empty from pandas import DataFrame, read_csv, Index, Timedelta, NaT from trading_calendars import register_calendar_alias from zipline.utils.cli import maybe_show_progress from zipline.data.bundles import core as bundles from zipline.data.bundles.common import asset_to_sid_map handler = StreamHandler(sys.stdout, format_string=" | {record.message}") logger = Logger(__name__) logger.handlers.append(handler) def csvdir_equities(tframes=None, csvdir=None): """ Generate an ingest function for custom data bundle This function can be used in ~/.zipline/extension.py to register bundle with custom parameters, e.g. with a custom trading calendar. Parameters ---------- tframes: tuple, optional The data time frames, supported timeframes: 'daily' and 'minute' csvdir : string, optional, default: CSVDIR environment variable The path to the directory of this structure: <directory>/<timeframe1>/<symbol1>.csv <directory>/<timeframe1>/<symbol2>.csv <directory>/<timeframe1>/<symbol3>.csv <directory>/<timeframe2>/<symbol1>.csv <directory>/<timeframe2>/<symbol2>.csv <directory>/<timeframe2>/<symbol3>.csv Returns ------- ingest : callable The bundle ingest function Examples -------- This code should be added to ~/.zipline/extension.py .. code-block:: python from zipline.data.bundles import csvdir_equities, register register('custom-csvdir-bundle', csvdir_equities(["daily", "minute"], '/full/path/to/the/csvdir/directory')) """ return CSVDIRBundle(tframes, csvdir).ingest class CSVDIRBundle: """ Wrapper class to call csvdir_bundle with provided list of time frames and a path to the csvdir directory """ def __init__(self, tframes=None, csvdir=None): self.tframes = tframes self.csvdir = csvdir def ingest(self, environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, self.tframes, self.csvdir) @bundles.register("csvdir") def csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, tframes=None, csvdir=None): """ Build a zipline data bundle from the directory with csv files. """ if not csvdir: csvdir = environ.get('CSVDIR') if not csvdir: raise ValueError("CSVDIR environment variable is not set") if not os.path.isdir(csvdir): raise ValueError("%s is not a directory" % csvdir) if not tframes: tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir)) if not tframes: raise ValueError("'daily' and 'minute' directories " "not found in '%s'" % csvdir) divs_splits = {'divs': DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']), 'splits': DataFrame(columns=['sid', 'ratio', 'effective_date'])} for tframe in tframes: ddir = os.path.join(csvdir, tframe) symbols = sorted(item.split('.csv')[0] for item in os.listdir(ddir) if '.csv' in item) if not symbols: raise ValueError("no <symbol>.csv* files found in %s" % ddir) dtype = [('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('symbol', 'object')] metadata = DataFrame(empty(len(symbols), dtype=dtype)) if tframe == 'minute': writer = minute_bar_writer else: writer = daily_bar_writer assets_to_sids = asset_to_sid_map(asset_db_writer.asset_finder, symbols) writer.write(_pricing_iter(ddir, symbols, metadata, divs_splits, show_progress, assets_to_sids = assets_to_sids), show_progress=show_progress) # Hardcode the exchange to "CSVDIR" for all assets and (elsewhere) # register "CSVDIR" to resolve to the NYSE calendar, because these # are all equities and thus can use the NYSE calendar. metadata['exchange'] = "CSVDIR" asset_db_writer.write(equities=metadata) divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int) divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int) adjustment_writer.write(splits=divs_splits['splits'], dividends=divs_splits['divs']) def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress, assets_to_sids={}): with maybe_show_progress(symbols, show_progress, label='Loading custom pricing data: ') as it: files = os.listdir(csvdir) for symbol in it: sid = assets_to_sids[symbol] logger.debug('%s: sid %s' % (symbol, sid)) try: fname = [fname for fname in files if '%s.csv' % symbol in fname][0] except IndexError: raise ValueError("%s.csv file is not in %s" % (symbol, csvdir)) dfr = read_csv(os.path.join(csvdir, fname), parse_dates=[0], infer_datetime_format=True, index_col=0).sort_index() start_date = dfr.index[0] end_date = dfr.index[-1] #print(dfr) #exit() # The auto_close date is the day after the last trade. ac_date = end_date + Timedelta(days=1) metadata.loc[sid] = start_date, end_date, ac_date, symbol if 'split' in dfr.columns: tmp = 1. / dfr[dfr['split'] != 1.0]['split'] split = DataFrame(data=tmp.index.tolist(), columns=['effective_date']) split['ratio'] = tmp.tolist() split['sid'] = sid splits = divs_splits['splits'] index = Index(range(splits.shape[0], splits.shape[0] + split.shape[0])) split.set_index(index, inplace=True) divs_splits['splits'] = splits.append(split) if 'dividend' in dfr.columns: # ex_date amount sid record_date declared_date pay_date tmp = dfr[dfr['dividend'] != 0.0]['dividend'] div = DataFrame(data=tmp.index.tolist(), columns=['ex_date']) div['record_date'] = NaT div['declared_date'] = NaT div['pay_date'] = NaT div['amount'] = tmp.tolist() div['sid'] = sid divs = divs_splits['divs'] ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0])) div.set_index(ind, inplace=True) divs_splits['divs'] = divs.append(div) yield sid, dfr register_calendar_alias("CSVDIR", "NYSE")
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/csvdir.py
csvdir.py
from collections import namedtuple import errno import os import shutil import warnings import click from logbook import Logger import pandas as pd from trading_calendars import get_calendar from toolz import curry, complement, take from ..adjustments import SQLiteAdjustmentReader, SQLiteAdjustmentWriter from ..bcolz_daily_bars import BcolzDailyBarReader, BcolzDailyBarWriter from ..minute_bars import ( BcolzMinuteBarReader, BcolzMinuteBarWriter, ) from ..psql_daily_bars import PSQLDailyBarReader, PSQLDailyBarWriter from zipline.assets import ( AssetDBWriter, AssetFinder, ASSET_DB_VERSION, ) from zipline.assets.asset_db_migrations import downgrade from zipline.utils.cache import ( dataframe_cache, working_dir, working_file, ) from zipline.utils.compat import ExitStack, mappingproxy from zipline.utils.input_validation import ensure_timestamp, optionally import zipline.utils.paths as pth from zipline.utils.preprocess import preprocess from sqlalchemy.exc import InvalidRequestError log = Logger(__name__) def asset_db_path(bundle_name, timestr, environ=None, db_version=None): return pth.data_path( asset_db_relative(bundle_name, timestr, db_version), environ=environ, ) def minute_equity_path(bundle_name, timestr, environ=None): return pth.data_path( minute_equity_relative(bundle_name, timestr), environ=environ, ) def daily_equity_path(bundle_name, timestr, environ=None): return pth.data_path( daily_equity_relative(bundle_name, timestr), environ=environ, ) def adjustment_db_path(bundle_name, timestr, environ=None): return pth.data_path( adjustment_db_relative(bundle_name, timestr), environ=environ, ) def cache_path(bundle_name, environ=None): return pth.data_path( cache_relative(bundle_name), environ=environ, ) def adjustment_db_relative(bundle_name, timestr): return bundle_name, timestr, 'adjustments.sqlite' def cache_relative(bundle_name): return bundle_name, '.cache' def daily_equity_relative(bundle_name, timestr): return bundle_name, timestr, 'daily_equities.bcolz' def minute_equity_relative(bundle_name, timestr): return bundle_name, timestr, 'minute_equities.bcolz' def asset_db_relative(bundle_name, timestr, db_version=None): db_version = ASSET_DB_VERSION if db_version is None else db_version return bundle_name, timestr, 'assets-%d.sqlite' % db_version def external_db_path(bundle_name, environ): import zipline.config.data_backend path = None if zipline.config.data_backend.db_backend_configured(): if zipline.config.data_backend.db_backend_configured() == 'postgres': db = zipline.config.data_backend.PostgresDB() host = db.host port = db.port user = db.user password = db.password user_pwd_str = f'{user}:{password}@' if user != '' else '' host_port_str = f'{host}:{port}' if port != '' else f'{host}' # we assume bundle-name as database-name path = f'postgresql://{user_pwd_str}{host_port_str}/{bundle_name}' else: backend = environ['ZIPLINE_DATA_BACKEND'] raise Exception(f'Backend {backend} currently not supported') return path def to_bundle_ingest_dirname(ts): """Convert a pandas Timestamp into the name of the directory for the ingestion. Parameters ---------- ts : pandas.Timestamp The time of the ingestions Returns ------- name : str The name of the directory for this ingestion. """ return ts.isoformat().replace(':', ';') def from_bundle_ingest_dirname(cs): """Read a bundle ingestion directory name into a pandas Timestamp. Parameters ---------- cs : str The name of the directory. Returns ------- ts : pandas.Timestamp The time when this ingestion happened. """ return pd.Timestamp(cs.replace(';', ':')) def ingestions_for_bundle(bundle, environ=None): print(os.listdir(pth.data_path([bundle], environ))) return sorted( (from_bundle_ingest_dirname(ing) for ing in os.listdir(pth.data_path([bundle], environ)) if not pth.hidden(ing)), reverse=True, ) RegisteredBundle = namedtuple( 'RegisteredBundle', ['calendar_name', 'start_session', 'end_session', 'minutes_per_day', 'ingest', 'create_writers'] ) BundleData = namedtuple( 'BundleData', 'asset_finder equity_minute_bar_reader equity_daily_bar_reader ' 'adjustment_reader', ) BundleCore = namedtuple( 'BundleCore', 'bundles register unregister ingest load clean', ) class UnknownBundle(click.ClickException, LookupError): """Raised if no bundle with the given name was registered. """ exit_code = 1 def __init__(self, name): super(UnknownBundle, self).__init__( 'No bundle registered with the name %r' % name, ) self.name = name def __str__(self): return self.message class BadClean(click.ClickException, ValueError): """Exception indicating that an invalid argument set was passed to ``clean``. Parameters ---------- before, after, keep_last : any The bad arguments to ``clean``. See Also -------- clean """ def __init__(self, before, after, keep_last): super(BadClean, self).__init__( 'Cannot pass a combination of `before` and `after` with ' '`keep_last`. Must pass one. ' 'Got: before=%r, after=%r, keep_last=%r\n' % ( before, after, keep_last, ), ) def __str__(self): return self.message def _make_bundle_core(): """Create a family of data bundle functions that read from the same bundle mapping. Returns ------- bundles : mappingproxy The mapping of bundles to bundle payloads. register : callable The function which registers new bundles in the ``bundles`` mapping. unregister : callable The function which deregisters bundles from the ``bundles`` mapping. ingest : callable The function which downloads and write data for a given data bundle. load : callable The function which loads the ingested bundles back into memory. clean : callable The function which cleans up data written with ``ingest``. """ _bundles = {} # the registered bundles # Expose _bundles through a proxy so that users cannot mutate this # accidentally. Users may go through `register` to update this which will # warn when trampling another bundle. bundles = mappingproxy(_bundles) @curry def register(name, f, calendar_name='NYSE', start_session=None, end_session=None, minutes_per_day=390, create_writers=True): """Register a data bundle ingest function. Parameters ---------- name : str The name of the bundle. f : callable The ingest function. This function will be passed: environ : mapping The environment this is being run with. asset_db_writer : AssetDBWriter The asset db writer to write into. minute_bar_writer : BcolzMinuteBarWriter The minute bar writer to write into. daily_bar_writer : BcolzDailyBarWriter The daily bar writer to write into. adjustment_writer : SQLiteAdjustmentWriter The adjustment db writer to write into. calendar : trading_calendars.TradingCalendar The trading calendar to ingest for. start_session : pd.Timestamp The first session of data to ingest. end_session : pd.Timestamp The last session of data to ingest. cache : DataFrameCache A mapping object to temporarily store dataframes. This should be used to cache intermediates in case the load fails. This will be automatically cleaned up after a successful load. show_progress : bool Show the progress for the current load where possible. calendar_name : str, optional The name of a calendar used to align bundle data. Default is 'NYSE'. start_session : pd.Timestamp, optional The first session for which we want data. If not provided, or if the date lies outside the range supported by the calendar, the first_session of the calendar is used. end_session : pd.Timestamp, optional The last session for which we want data. If not provided, or if the date lies outside the range supported by the calendar, the last_session of the calendar is used. minutes_per_day : int, optional The number of minutes in each normal trading day. create_writers : bool, optional Should the ingest machinery create the writers for the ingest function. This can be disabled as an optimization for cases where they are not needed, like the ``quantopian-quandl`` bundle. Notes ----- This function my be used as a decorator, for example: .. code-block:: python @register('quandl') def quandl_ingest_function(...): ... See Also -------- zipline.data.bundles.bundles """ if name in bundles: warnings.warn( 'Overwriting bundle with name %r' % name, stacklevel=3, ) # NOTE: We don't eagerly compute calendar values here because # `register` is called at module scope in zipline, and creating a # calendar currently takes between 0.5 and 1 seconds, which causes a # noticeable delay on the zipline CLI. _bundles[name] = RegisteredBundle( calendar_name=calendar_name, start_session=start_session, end_session=end_session, minutes_per_day=minutes_per_day, ingest=f, create_writers=create_writers, ) return f def unregister(name): """Unregister a bundle. Parameters ---------- name : str The name of the bundle to unregister. Raises ------ UnknownBundle Raised when no bundle has been registered with the given name. See Also -------- zipline.data.bundles.bundles """ try: del _bundles[name] except KeyError: raise UnknownBundle(name) def ingest(name, environ=os.environ, timestamp=None, assets_versions=(), show_progress=False): """Ingest data for a given bundle. Parameters ---------- name : str The name of the bundle. environ : mapping, optional The environment variables. By default this is os.environ. timestamp : datetime, optional The timestamp to use for the load. By default this is the current time. assets_versions : Iterable[int], optional Versions of the assets db to which to downgrade. show_progress : bool, optional Tell the ingest function to display the progress where possible. """ try: bundle = bundles[name] except KeyError: raise UnknownBundle(name) calendar = get_calendar(bundle.calendar_name) start_session = bundle.start_session end_session = bundle.end_session if start_session is None or start_session < calendar.first_session: start_session = calendar.first_session if end_session is None or end_session > calendar.last_session: end_session = calendar.last_session if timestamp is None: timestamp = pd.Timestamp.utcnow() timestamp = timestamp.tz_convert('utc').tz_localize(None) timestr = to_bundle_ingest_dirname(timestamp) cachepath = cache_path(name, environ=environ) pth.ensure_directory(cachepath) # depending on the environment we might want to get a path to # an external postgres-db instead of one to a local sqlite-db # also, we need an asset-finder in case we have an external db # to make it possible to get ids for asset-symbols db_path_external = external_db_path(name, environ) # needs to be checkout outside of 'with' in case create_writers is false # only 'sqlite-bcolz'-backend needs to ensure local folders if not db_path_external: pth.ensure_directory(pth.data_path([name, timestr], environ=environ)) with dataframe_cache(cachepath, clean_on_failure=False) as cache, \ ExitStack() as stack: # we use `cleanup_on_failure=False` so that we don't purge the # cache directory if the load fails in the middle if bundle.create_writers: wd = stack.enter_context(working_dir( pth.data_path([], environ=environ)) ) asset_finder = None if db_path_external: assets_db_path = adjustments_db_path = daily_bar_writer = db_path_external daily_bar_writer = PSQLDailyBarWriter( db_path_external, calendar, start_session, end_session, ) daily_bar_reader = PSQLDailyBarReader(db_path_external) minute_bar_writer = None try: asset_finder = AssetFinder(db_path_external) except InvalidRequestError: asset_finder = None else: pth.ensure_directory(pth.data_path([name, timestr], environ=environ)) assets_db_path = wd.getpath(*asset_db_relative(name, timestr)) adjustments_db_path = adjustment_db_path(name, timestr) adjustments_db_path = wd.getpath(*adjustment_db_relative(name, timestr)) daily_bars_path = wd.ensure_dir( *daily_equity_relative(name, timestr) ) daily_bar_writer = BcolzDailyBarWriter( daily_bars_path, calendar, start_session, end_session, ) daily_bar_reader = BcolzDailyBarReader(daily_bars_path) minute_bar_writer = BcolzMinuteBarWriter( wd.ensure_dir(*minute_equity_relative(name, timestr)), calendar, start_session, end_session, minutes_per_day=bundle.minutes_per_day, ) # Do an empty write to ensure that the daily ctables exist # when we create the SQLiteAdjustmentWriter below. The # SQLiteAdjustmentWriter needs to open the daily ctables so # that it can compute the adjustment ratios for the dividends. daily_bar_writer.write(()) asset_db_writer = AssetDBWriter(assets_db_path, asset_finder) adjustment_db_writer = stack.enter_context( SQLiteAdjustmentWriter( adjustments_db_path, daily_bar_reader, overwrite=True, ) ) else: daily_bar_writer = None minute_bar_writer = None asset_db_writer = None adjustment_db_writer = None if assets_versions: raise ValueError('Need to ingest a bundle that creates ' 'writers in order to downgrade the assets' ' db.') log.info("Ingesting {}.", name) bundle.ingest( environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_db_writer, calendar, start_session, end_session, cache, show_progress, pth.data_path([name, timestr], environ=environ), ) for version in sorted(set(assets_versions), reverse=True): version_path = wd.getpath(*asset_db_relative( name, timestr, db_version=version, )) with working_file(version_path) as wf: shutil.copy2(assets_db_path, wf.path) downgrade(wf.path, version) def most_recent_data(bundle_name, timestamp, environ=None): """Get the path to the most recent data after ``date``for the given bundle. Parameters ---------- bundle_name : str The name of the bundle to lookup. timestamp : datetime The timestamp to begin searching on or before. environ : dict, optional An environment dict to forward to zipline_root. """ if bundle_name not in bundles: raise UnknownBundle(bundle_name) try: candidates = os.listdir( pth.data_path([bundle_name], environ=environ), ) return pth.data_path( [bundle_name, max( filter(complement(pth.hidden), candidates), key=from_bundle_ingest_dirname, )], environ=environ, ) except (ValueError, OSError) as e: if getattr(e, 'errno', errno.ENOENT) != errno.ENOENT: raise raise ValueError( 'no data for bundle {bundle!r} on or before {timestamp}\n' 'maybe you need to run: $ zipline ingest -b {bundle}'.format( bundle=bundle_name, timestamp=timestamp, ), ) def load(name, environ=os.environ, timestamp=None): """Loads a previously ingested bundle. Parameters ---------- name : str The name of the bundle. environ : mapping, optional The environment variables. Defaults of os.environ. timestamp : datetime, optional The timestamp of the data to lookup. Defaults to the current time. Returns ------- bundle_data : BundleData The raw data readers for this bundle. """ if timestamp is None: timestamp = pd.Timestamp.utcnow() timestr = most_recent_data(name, timestamp, environ=environ) db_path_external = external_db_path(name, environ) if db_path_external: assets_db_path = db_path_external adjustments_db_path = db_path_external # assets_db_path = asset_db_path(name, timestr, environ=environ) # adjustments_db_path = adjustment_db_path(name, timestr, environ=environ) daily_bar_reader = PSQLDailyBarReader(db_path_external) minute_bar_reader = None else: assets_db_path = asset_db_path(name, timestr, environ=environ) adjustments_db_path = adjustment_db_path(name, timestr, environ=environ) daily_bar_reader = BcolzDailyBarReader(daily_equity_path(name, timestr, environ=environ)) minute_bar_reader = BcolzMinuteBarReader(minute_equity_path(name, timestr, environ=environ)) return BundleData( asset_finder=AssetFinder( assets_db_path ), equity_minute_bar_reader=minute_bar_reader, equity_daily_bar_reader=daily_bar_reader, adjustment_reader=SQLiteAdjustmentReader( adjustments_db_path ), ) @preprocess( before=optionally(ensure_timestamp), after=optionally(ensure_timestamp), ) def clean(name, before=None, after=None, keep_last=None, environ=os.environ): """Clean up data that was created with ``ingest`` or ``$ python -m zipline ingest`` Parameters ---------- name : str The name of the bundle to remove data for. before : datetime, optional Remove data ingested before this date. This argument is mutually exclusive with: keep_last after : datetime, optional Remove data ingested after this date. This argument is mutually exclusive with: keep_last keep_last : int, optional Remove all but the last ``keep_last`` ingestions. This argument is mutually exclusive with: before after environ : mapping, optional The environment variables. Defaults of os.environ. Returns ------- cleaned : set[str] The names of the runs that were removed. Raises ------ BadClean Raised when ``before`` and or ``after`` are passed with ``keep_last``. This is a subclass of ``ValueError``. """ try: all_runs = sorted( filter( complement(pth.hidden), os.listdir(pth.data_path([name], environ=environ)), ), key=from_bundle_ingest_dirname, ) except OSError as e: if e.errno != errno.ENOENT: raise raise UnknownBundle(name) if before is after is keep_last is None: raise BadClean(before, after, keep_last) if ((before is not None or after is not None) and keep_last is not None): raise BadClean(before, after, keep_last) if keep_last is None: def should_clean(name): dt = from_bundle_ingest_dirname(name) return ( (before is not None and dt < before) or (after is not None and dt > after) ) elif keep_last >= 0: last_n_dts = set(take(keep_last, reversed(all_runs))) def should_clean(name): return name not in last_n_dts else: raise BadClean(before, after, keep_last) cleaned = set() for run in all_runs: if should_clean(run): log.info("Cleaning {}.", run) path = pth.data_path([name, run], environ=environ) shutil.rmtree(path) cleaned.add(path) return cleaned return BundleCore(bundles, register, unregister, ingest, load, clean) bundles, register, unregister, ingest, load, clean = _make_bundle_core()
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/core.py
core.py
import numpy as np import pandas as pd from alpha_vantage.timeseries import TimeSeries from datetime import date, timedelta from trading_calendars import TradingCalendar from ratelimit import limits, sleep_and_retry import zipline.config from zipline.data.bundles import core as bundles from zipline.data.bundles.common import asset_to_sid_map from zipline.data.bundles.universe import Universe, get_sp500, get_sp100, get_nasdaq100, all_alpaca_assets from zipline.data import bundles as bundles_module import trading_calendars import os import time av_config = zipline.config.bundle.AlphaVantage() AV_FREQ_SEC = av_config.sample_frequency AV_CALLS_PER_FREQ = av_config.max_calls_per_freq AV_TOLERANCE_SEC = av_config.breathing_space os.environ["ALPHAVANTAGE_API_KEY"] = av_config.api_key # make sure it's set in env variable UNIVERSE = Universe.NASDAQ100 ASSETS = None def list_assets(): global ASSETS if not ASSETS: custom_asset_list = av_config.av.get("custom_asset_list") if custom_asset_list: custom_asset_list = custom_asset_list.strip().replace(" ", "").split(",") ASSETS = list(set(custom_asset_list)) else: try: universe = Universe[av_config.av["universe"]] except: universe = Universe.ALL if universe == Universe.ALL: # alpha vantage doesn't define a universe. we could try using alpaca's universe if the # user defined credentials. if not, we will raise an exception. try: import zipline.data.bundles.alpaca_api as alpaca alpaca.initialize_client() ASSETS = all_alpaca_assets(alpaca.CLIENT) except: raise Exception("You tried to use Universe.ALL but you didn't define the alpaca credentials.") elif universe == Universe.SP100: ASSETS = get_sp100() elif universe == Universe.SP500: ASSETS = get_sp500() elif universe == Universe.NASDAQ100: ASSETS = get_nasdaq100() ASSETS = list(set(ASSETS)) return ASSETS def fill_daily_gaps(df): """ filling missing data. logic: 1. get start date and end date from df. (caveat: if the missing dates are at the edges this will not work) 2. use trading calendars to get all session dates between start and end 3. use difference() to get only missing dates. 4. add those dates to the original df with NaN 5. dividends get 0 and split gets 1 (meaning no split happened) 6. all the rest get ffill of the close value. 7. volume get 0 :param df: :return: """ cal: TradingCalendar = trading_calendars.get_calendar('NYSE') sessions = cal.sessions_in_range(df.index[0], df.index[-1]) if len(df.index) == len(sessions): return df to_fill = sessions.difference(df.index) df = df.append(pd.DataFrame(index=to_fill)).sort_index() # forward-fill these values regularly df.close.fillna(method='ffill', inplace=True) df.dividend.fillna(0, inplace=True) df.split.fillna(1, inplace=True) df.volume.fillna(0, inplace=True) df.open.fillna(df.close, inplace=True) df.high.fillna(df.close, inplace=True) df.low.fillna(df.close, inplace=True) df.adj_close.fillna(df.close, inplace=True) filled = len(to_fill) print(f'\nWarning! Filled {filled} empty values!') return df # purpose of this function is to encapsulate both minute- and daily-requests in one # function to be able to properly do rate-limiting. @sleep_and_retry @limits(calls=AV_CALLS_PER_FREQ, period=AV_FREQ_SEC + AV_TOLERANCE_SEC) def av_api_wrapper(symbol, interval, _slice=None): if interval == '1m': ts = TimeSeries(output_format='csv') data_slice, meta_data = ts.get_intraday_extended(symbol, interval='1min', slice=_slice, adjusted='false') return data_slice else: ts = TimeSeries() data, meta_data = ts.get_daily_adjusted(symbol, outputsize='full') return data def av_get_data_for_symbol(symbol, start, end, interval): if interval == '1m': data = [] for i in range(1, 3): for j in range(1, 13): _slice = 'year' + str(i) + 'month' + str(j) # print('requesting slice ' + _slice + ' for ' + symbol) data_slice = av_api_wrapper(symbol, interval=interval, slice=_slice) # dont know better way to convert _csv.reader to list or DataFrame table = [] for line in data_slice: table.append(line) # strip header-row from csv table = table[1:] data = data + table df = pd.DataFrame(data, columns=['date', 'open', 'high', 'low', 'close', 'volume']) df.index = pd.to_datetime(df['date']) df.index = df.index.tz_localize('UTC') df.drop(columns=['date'], inplace=True) else: data = av_api_wrapper(symbol, interval) df = pd.DataFrame.from_dict(data, orient='index') df.index = pd.to_datetime(df.index).tz_localize('UTC') df.rename(columns={ '1. open': 'open', '2. high': 'high', '3. low': 'low', '4. close': 'close', '5. volume': 'volume', '5. adjusted close': 'adj_close', '6. volume': 'volume', '7. dividend amount': 'dividend', '8. split coefficient': 'split' }, inplace=True) # fill potential gaps in data df = fill_daily_gaps(df) df.sort_index(inplace=True) # data comes as strings df['open'] = pd.to_numeric(df['open'], downcast='float') df['high'] = pd.to_numeric(df['high'], downcast='float') df['low'] = pd.to_numeric(df['low'], downcast='float') df['close'] = pd.to_numeric(df['close'], downcast='float') df['volume'] = pd.to_numeric(df['volume'], downcast='unsigned') if 'adj_close' in df.columns: df['adj_close'] = pd.to_numeric(df['adj_close'], downcast='float') if 'dividend' in df.columns: df['dividend'] = pd.to_numeric(df['dividend'], downcast='float') if 'split' in df.columns: df['split'] = pd.to_numeric(df['split'], downcast='float') return df # collect all days where there were splits and calculate split-ratio # by 1 / split-factor. save them together with effective-date. def calc_split(sid, df): tmp = 1. / df[df['split'] != 1.0]['split'] split = pd.DataFrame(data=tmp.index.tolist(), columns=['effective_date']) split['ratio'] = tmp.tolist() split['sid'] = np.int(sid) # split['effective_date'] = pd.to_datetime(split['effective_date'], utc=True) split['effective_date'] = split['effective_date'].apply(lambda x: x.timestamp()) return split # collect all dividends and the dates when they were issued, # fill stuff we don't know with empty-values def calc_dividend(sid, df, sessions): tmp = df[df['dividend'] != 0.0]['dividend'] div = pd.DataFrame(data=tmp.index.tolist(), columns=['ex_date']) # as we do not know these values, set something as done in csvdir # there it writes nats but in case of writing to postgres, # pd.NaT will exceed BigInt for some reason natValue = pd.to_datetime('1800-1-1') div['record_date'] = natValue div['declared_date'] = natValue # "guess" a dividend-pay-date 10 trading-days in the future div['pay_date'] = [sessions[sessions.get_loc(ex_date) + 10] for ex_date in div['ex_date']] div['amount'] = tmp.tolist() div['sid'] = np.int(sid) # convert to string and then back to datetime, otherwise pd.concat will fail div['ex_date'] = div['ex_date'].apply(lambda x: x.strftime('%Y-%m-%d 00:00:00')) div['pay_date'] = div['pay_date'].apply(lambda x: x.strftime('%Y-%m-%d 00:00:00')) return div def df_generator(interval, start, end, divs_splits, assets_to_sids={}): exchange = 'NYSE' # get calendar and extend it to 20 days to the future to be able # to set dividend-pay-date to a valid session cal: TradingCalendar = trading_calendars.get_calendar('NYSE') sessions = cal.sessions_in_range(start, end + timedelta(days=20)) asset_list = list_assets() for symbol in asset_list: try: df = av_get_data_for_symbol(symbol, start, end, interval) sid = assets_to_sids[symbol] first_traded = df.index[0] auto_close_date = df.index[-1] + pd.Timedelta(days=1) if 'split' in df.columns: split = calc_split(sid, df) divs_splits['splits'] = divs_splits['splits'].append(split) if 'dividend' in df.columns: div = calc_dividend(sid, df, sessions) divs_splits['divs'] = pd.concat([divs_splits['divs'], div]) yield (sid, df), symbol, symbol, start, end, first_traded, auto_close_date, exchange except KeyboardInterrupt: exit() except Exception as e: # somehow rate-limiting does not work with exceptions, throttle manually if 'Thank you for using Alpha Vantage! Our standard API call frequency is' in str(e): print(f'\nGot rate-limit on remote-side, retrying symbol {symbol} later') asset_list.append(symbol) else: print(f'\nException for symbol {symbol}') print(e) def metadata_df(assets_to_sids={}): metadata = [] sids = [sid for _, sid in assets_to_sids.items()] metadata_dtype = [ ('symbol', 'object'), ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata_df = pd.DataFrame( np.empty(len(list_assets()), dtype=metadata_dtype)) metadata_df.index = sids return metadata_df @bundles.register('alpha_vantage', calendar_name="NYSE", minutes_per_day=390) def api_to_bundle(interval=['1m']): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): divs_splits = {'divs': pd.DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']), 'splits': pd.DataFrame(columns=['sid', 'ratio', 'effective_date'])} assets_to_sids = asset_to_sid_map(asset_db_writer.asset_finder, list_assets()) def minute_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator( interval='1m', start=start_session, end=end_session, assets_to_sids=assets_to_sids, divs_splits=divs_splits)) def daily_data_generator(): return (sid_df for (sid_df, *metadata.loc[sid_df[0]]) in df_generator( interval='1d', start=start_session, end=end_session, assets_to_sids=assets_to_sids, divs_splits=divs_splits)) metadata = metadata_df(assets_to_sids) assets = list_assets() for _interval in interval: if _interval == '1d': daily_bar_writer.write(daily_data_generator(), assets=assets_to_sids.values(), show_progress=True, invalid_data_behavior='raise') elif _interval == '1m': minute_bar_writer.write(minute_data_generator(), show_progress=True) metadata.dropna(inplace=True) asset_db_writer.write(equities=metadata) # convert back wrong datatypes after pd.concat divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(np.int) divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(np.int) divs_splits['divs']['ex_date'] = pd.to_datetime(divs_splits['divs']['ex_date'], utc=True) divs_splits['divs']['pay_date'] = pd.to_datetime(divs_splits['divs']['pay_date'], utc=True) adjustment_writer.write(splits=divs_splits['splits'], dividends=divs_splits['divs']) # Drop the ticker rows which have missing sessions in their data sets print(metadata) return ingest if __name__ == '__main__': from zipline.data.bundles import register cal: TradingCalendar = trading_calendars.get_calendar('NYSE') # alpha-vantage has a fixed time-window, no point in changing these start_date = pd.Timestamp('1999-11-1', tz='utc') end_date = pd.Timestamp(date.today() - timedelta(days=1), tz='utc') while not cal.is_session(end_date): end_date -= timedelta(days=1) print('ingesting alpha_vantage-data from: ' + str(start_date) + ' to: ' + str(end_date)) start_time = time.time() register( 'alpha_vantage', # api_to_bundle(interval=['1d', '1m']), # api_to_bundle(interval=['1m']), api_to_bundle(interval=['1d']), calendar_name='NYSE', start_session=start_date, end_session=end_date ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "alpha_vantage", os.environ, assets_versions=assets_version, show_progress=True, ) print("--- %s seconds ---" % (time.time() - start_time))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/alpha_vantage_api.py
alpha_vantage_api.py
import alpaca_trade_api as tradeapi from datetime import timedelta import numpy as np from os.path import isfile, join from pathlib import Path import pandas as pd import pickle from alpaca_trade_api.common import URL from dateutil import tz from trading_calendars import TradingCalendar import yaml from zipline.data.bundles import core as bundles from dateutil.parser import parse as date_parse user_home = str(Path.home()) custom_data_path = join(user_home, '.zipline/custom_data') CLIENT: tradeapi.REST = None NY = "America/New_York" def initialize_client(): global CLIENT with open("polygon.yaml", mode='r') as f: o = yaml.safe_load(f) key = o["key_id"] secret = o["secret"] base_url = o["base_url"] CLIENT = tradeapi.REST(key_id=key, secret_key=secret, base_url=URL(base_url)) ASSETS = None def list_assets(): global ASSETS if not ASSETS: ASSETS = [_.symbol for _ in CLIENT.list_assets()] # ASSETS = [_.ticker for _ in CLIENT.polygon.all_tickers()] return ASSETS[:20] def tickers_generator(): """ Return a tuple (sid, ticker_pair) """ tickers_file = join(custom_data_path, 'alpaca_ticker_pairs.pickle') if not isfile(tickers_file): ticker_pairs = list_assets() else: with open(tickers_file, 'rb') as f: ticker_pairs = pickle.load(f)[:] return (tuple((sid, ticker)) for sid, ticker in enumerate(ticker_pairs)) def iso_date(date_str): """ this method will make sure that dates are formatted properly as with isoformat :param date_str: :return: YYYY-MM-DD date formatted """ return date_parse(date_str).date().isoformat() def get_aggs_from_polygon(dataname, dtbegin, dtend, granularity, compression): """ so polygon has a much more convenient api for this than alpaca because we could insert the compression in to the api call and we don't need to resample it. but, at this point in time, something is not working properly and data is returned in segments. meaning, we have patches of missing data. e.g we request data from 2020-03-01 to 2020-07-01 and we get something like this: 2020-03-01:2020-03-15, 2020-06-25:2020-07-01 so that makes life difficult.. there's no way to know which patch will be returned and which one we should try to get again. so the solution must be, ask data in segments. I select an arbitrary time window of 2 weeks, and split the calls until we get all required data """ def _clear_out_of_market_hours(df): """ only interested in samples between 9:30, 16:00 NY time """ return df.between_time("09:30", "16:00") def _fillna(df, granularity, start, end): if granularity != 'day': return df if df.empty: return df calendar: TradingCalendar = trading_calendars.get_calendar("NYSE") last_val = df.iloc[0] current = start while current <= end: if calendar.is_session(current): if current.replace(tzinfo=tz.gettz(NY)) in df.index: last_val = df.loc[current.replace(tzinfo=tz.gettz(NY))] else: # df.loc[pytz.timezone(NY).localize(current)] = last_val df.loc[current.replace(tzinfo=tz.gettz(NY))] = last_val current += timedelta(days=1) return df if granularity == 'day': cdl = CLIENT.polygon.historic_agg_v2( dataname, compression, granularity, _from=iso_date(dtbegin.isoformat()), to=iso_date(dtend.isoformat())).df cdl = _fillna(cdl, granularity, dtbegin, dtend) else: cdl = pd.DataFrame() segment_start = dtbegin segment_end = segment_start + timedelta(weeks=2) if \ dtend - dtbegin >= timedelta(weeks=2) else dtend while segment_end <= dtend and dtend not in cdl.index: response = CLIENT.polygon.historic_agg_v2( dataname, compression, granularity, _from=iso_date(segment_start.isoformat()), to=iso_date(segment_end.isoformat())) # No result from the server, most likely error if response.df.shape[0] == 0 and cdl.shape[0] == 0: raise Exception("received empty response") temp = response.df cdl = pd.concat([cdl, temp]) cdl = cdl[~cdl.index.duplicated()] segment_start = segment_end segment_end = segment_start + timedelta(weeks=2) if \ dtend - dtbegin >= timedelta(weeks=2) else dtend cdl = _clear_out_of_market_hours(cdl) return cdl def df_generator(interval, start, end): exchange = 'NYSE' for sid, symbol in enumerate(list_assets()): try: df = get_aggs_from_polygon(symbol, start, end, 'day' if interval == '1d' else 'minute', 1) if df.empty: continue start_date = df.index[0] end_date = df.index[-1] first_traded = start auto_close_date = end + pd.Timedelta(days=1) # # Check if there is any missing session; skip the ticker pair otherwise # if interval == '1d' and len(df.index) - 1 != pd.Timedelta(end_date - start_date).days: # # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) # continue # elif interval == '1m' and timedelta(minutes=(len(df.index) + 60)) != end_date - start_date: # # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) # continue yield (sid, df.sort_index()), symbol, start, end, first_traded, auto_close_date, exchange except Exception as e: import traceback traceback.print_exc() print(f"error while processig {(sid, symbol)}: {e}") def metadata_df(): metadata_dtype = [ ('symbol', 'object'), # ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata_df = pd.DataFrame( np.empty(len(list_assets()), dtype=metadata_dtype)) return metadata_df @bundles.register('polygon_api', calendar_name="NYSE", minutes_per_day=390) def api_to_bundle(interval=['1m']): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): def minute_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1m', start=start_session, end=end_session)) def daily_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1d', start=start_session, end=end_session)) for _interval in interval: metadata = metadata_df() if _interval == '1d': daily_bar_writer.write(daily_data_generator(), show_progress=True) elif _interval == '1m': minute_bar_writer.write( minute_data_generator(), show_progress=True) # Drop the ticker rows which have missing sessions in their data sets metadata.dropna(inplace=True) asset_db_writer.write(equities=metadata) print(metadata) adjustment_writer.write() return ingest if __name__ == '__main__': from zipline.data.bundles import register from zipline.data import bundles as bundles_module import trading_calendars import os cal: TradingCalendar = trading_calendars.get_calendar('NYSE') start_date = pd.Timestamp('2019-08-03 0:00', tz='utc') while not cal.is_session(start_date): start_date += timedelta(days=1) end_date = pd.Timestamp('now', tz='utc').date() - timedelta(days=1) while not cal.is_session(end_date): end_date -= timedelta(days=1) end_date = pd.Timestamp(end_date, tz='utc') initialize_client() register( 'polygon_api', # api_to_bundle(interval=['1d', '1m']), # api_to_bundle(interval=['1m']), api_to_bundle(interval=['1d']), calendar_name='NYSE', start_session=start_date, end_session=end_date ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "polygon_api", os.environ, assets_versions=assets_version, show_progress=True, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/polygon_api.py
polygon_api.py
import bs4 as bs from binance.client import Client import csv from datetime import datetime as dt from datetime import timedelta import numpy as np from os import listdir, mkdir, remove from os.path import exists, isfile, join from pathlib import Path import pandas as pd import pickle import requests from trading_calendars import register_calendar # from trading_calendars.exchange_calendar_binance import BinanceExchangeCalendar import yaml from zipline.data.bundles import core as bundles user_home = str(Path.home()) custom_data_path = join(user_home, '.zipline/custom_data') CLIENT: Client = None def initialize_client(): global CLIENT with open("binance.yaml", mode='r') as f: o = yaml.safe_load(f) key = o["key_id"] secret = o["secret"] CLIENT = Client(key, secret) def get_binance_pairs(**kwargs): base_currencies = kwargs.get('base_currencies', '') quote_currencies = kwargs.get('quote_currencies', '') binance_pairs = list() all_tickers = CLIENT.get_all_tickers() # if not self.futures: # all_tickers = CLIENT.get_all_tickers() # else: # all_tickers = CLIENT.futures_ticker() if base_currencies and quote_currencies: input_pairs = [x + y for x in quote_currencies for y in base_currencies] for x, currency_pair in enumerate(all_tickers): if base_currencies and quote_currencies: for pair in input_pairs: if currency_pair['symbol'] == pair.upper(): binance_pairs.append(currency_pair['symbol']) break elif base_currencies: for base_currency in base_currencies: if currency_pair['symbol'][-len(base_currency):] == base_currency.upper(): binance_pairs.append(currency_pair['symbol']) break elif quote_currencies: for quote_currency in quote_currencies: if currency_pair['symbol'][:len(quote_currency)] == quote_currency.upper(): binance_pairs.append(currency_pair['symbol']) break else: binance_pairs.append(currency_pair['symbol']) if binance_pairs: return binance_pairs else: raise ValueError('Invalid Input: Binance returned no matching currency pairs.') def tickers(): """ Save Binance trading pair tickers to a pickle file Return a list of trading ticker pairs """ cmc_binance_url = 'https://coinmarketcap.com/exchanges/binance/' response = requests.get(cmc_binance_url) if response.ok: soup = bs.BeautifulSoup(response.text, 'html.parser') table = soup.find('table', {'id': 'exchange-markets'}) ticker_pairs = [] for row in table.findAll('tr')[1:]: ticker_pair = row.findAll('td')[2].text ticker_pairs.append(ticker_pair.strip().replace('/', '')) if not exists(custom_data_path): mkdir(custom_data_path) with open(join(custom_data_path, 'binance_ticker_pairs.pickle'), 'wb') as f: pickle.dump(ticker_pairs, f) return ticker_pairs def tickers_generator(): """ Return a tuple (sid, ticker_pair) """ tickers_file = join(custom_data_path, 'binance_ticker_pairs.pickle') if not isfile(tickers_file): ticker_pairs = get_binance_pairs() else: with open(tickers_file, 'rb') as f: ticker_pairs = pickle.load(f)[:] return (tuple((sid, ticker)) for sid, ticker in enumerate(ticker_pairs)) def df_generator(interval): start = '2017-7-14' # Binance launch date end = dt.utcnow().strftime('%Y-%m-%d') # Current day for item in tickers_generator(): try: sid = item[0] ticker_pair = item[1] df = pd.DataFrame( columns=['date', 'open', 'high', 'low', 'close', 'volume']) symbol = ticker_pair print(symbol, interval) asset_name = ticker_pair exchange = 'Binance' klines = CLIENT.get_historical_klines_generator( ticker_pair, interval, start, end) for kline in klines: line = kline[:] del line[6:] # Make a real copy of kline # Binance API forbids the change of open time line[0] = np.datetime64(line[0], 'ms') line[0] = pd.Timestamp(line[0], 'ms') df.loc[len(df)] = line df['date'] = pd.to_datetime(df['date']) df.set_index('date', inplace=True) df = df.astype({'open': 'float64', 'high': 'float64', 'low': 'float64', 'close': 'float64', 'volume': 'float64'}) start_date = df.index[0] end_date = df.index[-1] first_traded = start_date auto_close_date = end_date + pd.Timedelta(days=1) # Check if there is any missing session; skip the ticker pair otherwise if interval == '1d' and len(df.index) - 1 != pd.Timedelta(end_date - start_date).days: # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) continue elif interval == '1m' and timedelta(minutes=(len(df.index) + 60)) != end_date - start_date: # print('Missing sessions found in {}. Skip importing'.format(ticker_pair)) continue yield (sid, df), symbol, asset_name, start_date, end_date, first_traded, auto_close_date, exchange except Exception as e: print(f"error while processig {ticker_pair}: {e}") def metadata_df(): metadata_dtype = [ ('symbol', 'object'), ('asset_name', 'object'), ('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('first_traded', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('exchange', 'object'), ] metadata_df = pd.DataFrame( np.empty(len(get_binance_pairs()), dtype=metadata_dtype)) return metadata_df @bundles.register('binance_api', calendar_name="24/7", minutes_per_day=1440) def api_to_bundle(interval=['1m']): def ingest(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir ): def minute_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1m')) def daily_data_generator(): return (sid_df for (sid_df, *metadata.iloc[sid_df[0]]) in df_generator(interval='1d')) for _interval in interval: metadata = metadata_df() if _interval == '1d': daily_bar_writer.write( daily_data_generator(), show_progress=True) elif _interval == '1m': minute_bar_writer.write( minute_data_generator(), show_progress=True) # Drop the ticker rows which have missing sessions in their data sets metadata.dropna(inplace=True) asset_db_writer.write(equities=metadata) print(metadata) adjustment_writer.write() return ingest if __name__ == '__main__': from zipline.data.bundles import register from zipline.data import bundles as bundles_module import os initialize_client() register( 'binance_api', # api_to_bundle(interval=['1d', '1m']), api_to_bundle(interval=['1m']), # api_to_bundle(interval=['1d']), calendar_name='24/7', ) assets_version = ((),)[0] # just a weird way to create an empty tuple bundles_module.ingest( "binance_api", os.environ, pd.Timestamp.utcnow(), assets_version, True, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/binance_api.py
binance_api.py
from io import BytesIO import tarfile from zipfile import ZipFile from click import progressbar from logbook import Logger import pandas as pd import requests from six.moves.urllib.parse import urlencode from six import iteritems from trading_calendars import register_calendar_alias from . import core as bundles import numpy as np log = Logger(__name__) ONE_MEGABYTE = 1024 * 1024 QUANDL_DATA_URL = ( 'https://www.quandl.com/api/v3/datatables/WIKI/PRICES.csv?' ) def format_metadata_url(api_key): """ Build the query URL for Quandl WIKI Prices metadata. """ query_params = [('api_key', api_key), ('qopts.export', 'true')] return ( QUANDL_DATA_URL + urlencode(query_params) ) def load_data_table(file, index_col, show_progress=False): """ Load data table from zip file provided by Quandl. """ with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'ex-dividend', 'split_ratio', ], ) data_table.rename( columns={ 'ticker': 'symbol', 'ex-dividend': 'ex_dividend', }, inplace=True, copy=False, ) return data_table def fetch_data_table(api_key, show_progress, retries): """ Fetch WIKI Prices data table from Quandl """ for _ in range(retries): try: if show_progress: log.info('Downloading WIKI metadata.') metadata = pd.read_csv( format_metadata_url(api_key) ) # Extract link from metadata and download zip file. table_url = metadata.loc[0, 'file.link'] if show_progress: raw_file = download_with_progress( table_url, chunk_size=ONE_MEGABYTE, label="Downloading WIKI Prices table from Quandl" ) else: raw_file = download_without_progress(table_url) return load_data_table( file=raw_file, index_col=None, show_progress=show_progress, ) except Exception: log.exception("Exception raised reading Quandl data. Retrying.") else: raise ValueError( "Failed to download Quandl data after %d attempts." % (retries) ) def gen_asset_metadata(data, show_progress): if show_progress: log.info('Generating asset metadata.') data = data.groupby( by='symbol' ).agg( {'date': [np.min, np.max]} ) data.reset_index(inplace=True) data['start_date'] = data.date.amin data['end_date'] = data.date.amax del data['date'] data.columns = data.columns.get_level_values(0) data['exchange'] = 'QUANDL' data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1) return data def parse_splits(data, show_progress): if show_progress: log.info('Parsing split data.') data['split_ratio'] = 1.0 / data.split_ratio data.rename( columns={ 'split_ratio': 'ratio', 'date': 'effective_date', }, inplace=True, copy=False, ) return data def parse_dividends(data, show_progress): if show_progress: log.info('Parsing dividend data.') data['record_date'] = data['declared_date'] = data['pay_date'] = pd.NaT data.rename( columns={ 'ex_dividend': 'amount', 'date': 'ex_date', }, inplace=True, copy=False, ) return data def parse_pricing_and_vol(data, sessions, symbol_map): for asset_id, symbol in iteritems(symbol_map): asset_data = data.xs( symbol, level=1 ).reindex( sessions.tz_localize(None) ).fillna(0.0) yield asset_id, asset_data @bundles.register('quandl') def quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): """ quandl_bundle builds a daily dataset using Quandl's WIKI Prices dataset. For more information on Quandl's API and how to obtain an API key, please visit https://docs.quandl.com/docs#section-authentication """ api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes adjustment_writer.write( splits=parse_splits( raw_data[[ 'sid', 'date', 'split_ratio', ]].loc[raw_data.split_ratio != 1], show_progress=show_progress ), dividends=parse_dividends( raw_data[[ 'sid', 'date', 'ex_dividend', ]].loc[raw_data.ex_dividend != 0], show_progress=show_progress ) ) def download_with_progress(url, chunk_size, **progress_kwargs): """ Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data def download_without_progress(url): """ Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url) resp.raise_for_status() return BytesIO(resp.content) QUANTOPIAN_QUANDL_URL = ( 'https://s3.amazonaws.com/quantopian-public-zipline-data/quandl' ) @bundles.register('quantopian-quandl', create_writers=False) def quantopian_quandl_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): if show_progress: data = download_with_progress( QUANTOPIAN_QUANDL_URL, chunk_size=ONE_MEGABYTE, label="Downloading Bundle: quantopian-quandl", ) else: data = download_without_progress(QUANTOPIAN_QUANDL_URL) with tarfile.open('r', fileobj=data) as tar: if show_progress: log.info("Writing data to %s." % output_dir) tar.extractall(output_dir) register_calendar_alias("QUANDL", "NYSE")
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/quandl.py
quandl.py
from io import BytesIO from zipfile import ZipFile from click import progressbar from logbook import Logger import pandas as pd import requests from six.moves.urllib.parse import urlencode from six import iteritems from trading_calendars import register_calendar_alias from zipline.data.bundles import core as bundles # looking in .zipline/extensions.py import numpy as np # Code from: # Quantopian Zipline Issues: # "Cannot find data bundle during ingest #2275" # https://github.com/quantopian/zipline/issues/2275 log = Logger(__name__) ONE_MEGABYTE = 1024 * 1024 QUANDL_DATA_URL = ( 'https://www.quandl.com/api/v3/datatables/SHARADAR/SEP.csv?' ) @bundles.register('sharadar-prices') def sharadar_prices_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): api_key = environ.get('QUANDL_API_KEY') if api_key is None: raise ValueError( "Please set your QUANDL_API_KEY environment variable and retry." ) ###ticker2sid_map = {} raw_data = fetch_data_table( api_key, show_progress, environ.get('QUANDL_DOWNLOAD_ATTEMPTS', 5) ) asset_metadata = gen_asset_metadata( raw_data[['symbol', 'date']], show_progress ) asset_db_writer.write(asset_metadata) symbol_map = asset_metadata.symbol sessions = calendar.sessions_in_range(start_session, end_session) raw_data.set_index(['date', 'symbol'], inplace=True) daily_bar_writer.write( parse_pricing_and_vol( raw_data, sessions, symbol_map ), show_progress=show_progress ) raw_data.reset_index(inplace=True) # raw_data.index = pd.DatetimeIndex(raw_data.date) ###ajjc changes raw_data['symbol'] = raw_data['symbol'].astype('category') raw_data['sid'] = raw_data.symbol.cat.codes # read in Dividend History # ajjc pharrin---------------------- ###uv = raw_data.symbol.unique() # get unique m_tickers (Zacks primary key) # iterate over all the unique securities and pack data, and metadata # for writing # counter of valid securites, this will be our primary key ###sec_counter = 0 ###for tkr in uv: ### #df_tkr = raw_data[raw_data['symbol'] == tkr] ### ticker2sid_map[tkr] = sec_counter # record the sid for use later ### sec_counter += 1 ### dfd = pd.read_csv(file_name, index_col='date', ### parse_dates=['date'], na_values=['NA']) # drop rows where dividends == 0.0 raw_data = raw_data[raw_data["dividends"] != 0.0] raw_data.set_index(['date', 'sid'], inplace=True) # raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.date # raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.date raw_data.loc[:, 'ex_date'] = raw_data.loc[:, 'record_date'] = raw_data.index.get_level_values('date') raw_data.loc[:, 'declared_date'] = raw_data.loc[:, 'pay_date'] = raw_data.index.get_level_values('date') # raw_data.loc[:, 'sid'] = raw_data.loc[:, 'symbol'].apply(lambda x: ticker2sid_map[x]) raw_data = raw_data.rename(columns={'dividends': 'amount'}) # raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume','symbol'], axis=1) raw_data.reset_index(inplace=True) raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'symbol', 'date'], axis=1) # raw_data = raw_data.drop(['open', 'high', 'low', 'close', 'volume', 'lastupdated', 'ticker', 'closeunadj'], axis=1) # # format dfd to have sid adjustment_writer.write(dividends=raw_data) # ajjc ---------------------------------- def format_metadata_url(api_key): """ Build the query URL for Quandl Prices metadata. """ query_params = [('api_key', api_key), ('qopts.export', 'true')] return ( QUANDL_DATA_URL + urlencode(query_params) ) def load_data_table(file, index_col, show_progress=False): """ Load data table from zip file provided by Quandl. """ with ZipFile(file) as zip_file: file_names = zip_file.namelist() assert len(file_names) == 1, "Expected a single file from Quandl." wiki_prices = file_names.pop() with zip_file.open(wiki_prices) as table_file: if show_progress: log.info('Parsing raw data.') data_table = pd.read_csv( table_file, parse_dates=['date'], index_col=index_col, usecols=[ 'ticker', 'date', 'open', 'high', 'low', 'close', 'volume', 'dividends', ##'closeunadj', ##'lastupdated' #prune last two columns for zipline bundle load ], ) data_table.rename( columns={ 'ticker': 'symbol' }, inplace=True, copy=False, ) return data_table def fetch_data_table(api_key, show_progress, retries): for _ in range(retries): try: if show_progress: log.info('Downloading Sharadar Price metadata.') metadata = pd.read_csv( format_metadata_url(api_key) ) # Extract link from metadata and download zip file. table_url = metadata.loc[0, 'file.link'] if show_progress: raw_file = download_with_progress( table_url, chunk_size=ONE_MEGABYTE, label="Downloading Prices table from Quandl Sharadar" ) else: raw_file = download_without_progress(table_url) return load_data_table( file=raw_file, index_col=None, show_progress=show_progress, ) except Exception: log.exception("Exception raised reading Quandl data. Retrying.") else: raise ValueError( "Failed to download Quandl data after %d attempts." % (retries) ) def gen_asset_metadata(data, show_progress): if show_progress: log.info('Generating asset metadata.') data = data.groupby( by='symbol' ).agg( {'date': [np.min, np.max]} ) data.reset_index(inplace=True) data['start_date'] = data.date.amin data['end_date'] = data.date.amax del data['date'] data.columns = data.columns.get_level_values(0) data['exchange'] = 'QUANDL' data['auto_close_date'] = data['end_date'].values + pd.Timedelta(days=1) return data def parse_pricing_and_vol(data, sessions, symbol_map): for asset_id, symbol in iteritems(symbol_map): asset_data = data.xs( symbol, level=1 ).reindex( sessions.tz_localize(None) ).fillna(0.0) yield asset_id, asset_data def download_with_progress(url, chunk_size, **progress_kwargs): """ Download streaming data from a URL, printing progress information to the terminal. Parameters ---------- url : str A URL that can be understood by ``requests.get``. chunk_size : int Number of bytes to read at a time from requests. **progress_kwargs Forwarded to click.progressbar. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url, stream=True) resp.raise_for_status() total_size = int(resp.headers['content-length']) data = BytesIO() with progressbar(length=total_size, **progress_kwargs) as pbar: for chunk in resp.iter_content(chunk_size=chunk_size): data.write(chunk) pbar.update(len(chunk)) data.seek(0) return data def download_without_progress(url): """ Download data from a URL, returning a BytesIO containing the loaded data. Parameters ---------- url : str A URL that can be understood by ``requests.get``. Returns ------- data : BytesIO A BytesIO containing the downloaded data. """ resp = requests.get(url) resp.raise_for_status() return BytesIO(resp.content) register_calendar_alias("sharadar-prices", "NYSE")
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/bundles/sharadar.py
sharadar.py
from interface import default, Interface import numpy as np import pandas as pd from zipline.utils.sentinel import sentinel from zipline.lib._factorize import factorize_strings DEFAULT_FX_RATE = sentinel('DEFAULT_FX_RATE') class FXRateReader(Interface): """ Interface for reading foreign exchange (fx) rates. An FX rate reader contains one or more distinct "rates", each of which corresponds to a collection of mappings from (quote, base, dt) -> float. The value produced for a given (quote, base, dt) triple is the exchange rate to use when converting from ``base`` to ``quote`` on ``dt``. The specific set of rates contained in a particular reader is user-defined. We infer no particular semantics from their names, other than that they are distinct rates. Examples of possible rate names might be things like "bid", "mid", and "ask", or "london_close", "tokyo_close", "nyse_close". Implementations of :class:`FXRateReader` must provide at least one method:: def get_rates(self, rate, quote, bases, dts): which takes a rate, a quote currency, an array of base currencies, and an array of dts, and produces a (len(dts), len(base))-shape array containing a conversion rates for all pairs in the cartesian product of bases and dts. Given a definition of :meth:`get_rates`, this interface automatically generates two additional methods:: def get_rates_scalar(self, rate, quote, base, dt): and:: def get_rates_columnar(self, rate, quote, bases, dts): :meth:`get_rates_scalar` takes scalar-valued ``base`` and ``dt`` values, and returns a scalar float value for the requested fx rate. :meth:`get_rates_columnar` takes parallel arrays of ``bases`` and ``dts`` and returns a same-length array of fx rates by performing a lookup on the (base, dt) pairs drawn from zipping together ``bases``, and ``dts``. In other words, its behavior is equivalent to:: def get_rates_columnnar(self, rate, quote, bases, dts): out = [] for base, dt in zip(bases, dts): out.append(self.get_rate_scalar(rate, quote, base, dt)) return np.array(out) """ def get_rates(self, rate, quote, bases, dts): """ Load a 2D array of fx rates. Parameters ---------- rate : str Name of the rate to load. quote : str Currency code of the currency to convert into. bases : np.array[object] Array of codes of the currencies to convert from. The same currency may appear multiple times. dts : pd.DatetimeIndex Datetimes for which to load rates. Must be sorted in ascending order and localized to UTC. Returns ------- rates : np.array Array of shape ``(len(dts), len(bases))`` containing foreign exchange rates mapping currencies from ``bases`` to ``quote``. The row at index i corresponds to the dt in dts[i]. The column at index j corresponds to the base currency in bases[j]. """ @default def get_rate_scalar(self, rate, quote, base, dt): """ Load a scalar FX rate value. Parameters ---------- rate : str Name of the rate to load. quote : str Currency code of the currency to convert into. base : str Currency code of the currency to convert from. dt : np.datetime64 or pd.Timestamp Datetime on which to load rate. Returns ------- rate : np.float64 Exchange rate from base -> quote on dt. """ rates_2d = self.get_rates( rate, quote, bases=np.array([base], dtype=object), dts=pd.DatetimeIndex([dt], tz='UTC'), ) return rates_2d[0, 0] @default def get_rates_columnar(self, rate, quote, bases, dts): """ Load a 1D array of FX rates. Parameters ---------- rate : str Name of the rate to load. quote : str Currency code of the currency to convert into. bases : np.array[object] Array of codes of the currencies to convert from. The same currency may appear multiple times. dts : np.DatetimeIndex Datetimes for which to load rates. The same value may appear multiple times. Datetimes do not need to be sorted. """ if len(bases) != len(dts): raise ValueError( "len(bases) ({}) != len(dts) ({})".format(len(bases), len(dts)) ) bases_ix, unique_bases, _ = factorize_strings( bases, missing_value=None, # Only dts need to be sorted, not bases. sort=False, ) # NOTE: np.unique returns unique_dts in sorted order, which is required # for calling get_rates. unique_dts, dts_ix = np.unique(dts.values, return_inverse=True) rates_2d = self.get_rates( rate, quote, unique_bases, pd.DatetimeIndex(unique_dts, tz='utc') ) return rates_2d[dts_ix, bases_ix]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/fx/base.py
base.py
from interface import implements import h5py from logbook import Logger import numpy as np import pandas as pd from zipline.utils.memoize import lazyval from zipline.utils.numpy_utils import bytes_array_to_native_str_object_array from .base import FXRateReader, DEFAULT_FX_RATE from .utils import check_dts, is_sorted_ascending HDF5_FX_VERSION = 0 HDF5_FX_DEFAULT_CHUNK_SIZE = 75 INDEX = 'index' DATA = 'data' CURRENCIES = 'currencies' DTS = 'dts' RATES = 'rates' log = Logger(__name__) class HDF5FXRateReader(implements(FXRateReader)): """An FXRateReader backed by HDF5. Parameters ---------- group : h5py.Group Top-level group written by an :class:`HDF5FXRateWriter`. default_rate : str Rate to use when ``get_rates`` is called requesting the default rate. """ def __init__(self, group, default_rate): self._group = group self._default_rate = default_rate if self.version != HDF5_FX_VERSION: raise ValueError( "FX Reader version ({}) != File Version ({})".format( HDF5_FX_VERSION, self.version, ) ) @classmethod def from_path(cls, path, default_rate): """ Construct from a file path. Parameters ---------- path : str Path to an HDF5 fx rates file. default_rate : str Rate to use when ``get_rates`` is called requesting the default rate. """ return cls(h5py.File(path), default_rate=default_rate) @lazyval def version(self): try: return self._group.attrs['version'] except KeyError: # TODO: Remove this. return 0 @lazyval def dts(self): """Column labels for rate groups. """ raw_dts = self._group[INDEX][DTS][:].astype('M8[ns]') if not is_sorted_ascending(raw_dts): raise ValueError("dts are not sorted for {}!".format(self._group)) return pd.DatetimeIndex(raw_dts, tz='UTC') @lazyval def currencies(self): """Row labels for rate groups. """ # Currencies are stored as fixed-length bytes in the file, but we want # `str` objects in memory. bytes_array = self._group[INDEX][CURRENCIES][:] objects = bytes_array_to_native_str_object_array(bytes_array) return pd.Index(objects) def get_rates(self, rate, quote, bases, dts): """Get rates to convert ``bases`` into ``quote``. See :class:`zipline.data.fx.base.FXRateReader` for details. """ if rate == DEFAULT_FX_RATE: rate = self._default_rate check_dts(dts) col_ixs = self.dts.searchsorted(dts, side='right') - 1 row_ixs = self.currencies.get_indexer(bases) try: dataset = self._group[DATA][rate][quote][RATES] except KeyError: raise ValueError( "FX rates not available for rate={}, quote_currency={}." .format(rate, quote) ) # OPTIMIZATION: Column indices correspond to dates, which must be in # sorted order. Rather than reading the entire dataset from h5, we can # read just the interval from min_col to max_col inclusive # # However, we also need to handle two important edge cases: # # 1. row_ixs contains -1 for any currencies we don't know about. # 2. col_ixs contains -1 for dts before the start of self.dts. # # If either of the above cases obtains, we want to return NaN for the # corresponding output locations. # We handle each of these cases by reading raw data into a buffer with # one extra column and one extra row. When we then permute the raw data # into the correct order, any row or column indices with values of -1 # will pull from the extra row/column, which will always contain NaN. slice_begin = max(col_ixs[0], 0) slice_end = max(col_ixs[-1], 0) + 1 # +1 to be inclusive of end date. # Allocate a buffer full of NaNs with one extra column and row. See # OPTIMIZATION notes above. buf = np.full( (len(self.currencies) + 1, slice_end - slice_begin + 1), np.nan, ) buf[:-1, :-1] = dataset[:, slice_begin:slice_end] # Permute the rows into place, pulling from the empty NaN locations for # row and column indices of -1. out = buf[:, col_ixs - slice_begin][row_ixs] # Transpose everything to maintain dts as row labels, currencies as col # labels which is expected everywhere else. return out.transpose() class HDF5FXRateWriter(object): """Writer class for HDF5 files consumed by HDF5FXRateReader. """ def __init__(self, group, date_chunk_size=HDF5_FX_DEFAULT_CHUNK_SIZE): self._group = group self._date_chunk_size = date_chunk_size def write(self, dts, currencies, data): """Write data to the file. Parameters ---------- dts : pd.DatetimeIndex Index of row labels for rates to be written. currencies : np.array[object] Array of column labels for rates to be written. data : iterator[(str, str, np.array[float64])] Iterator of (rate, quote_currency, array) tuples. Each array should be of shape ``(len(dts), len(currencies))``, and should contain a table of rates where each column is a timeseries of rates mapping its column label's currency to ``quote_currency``. """ if len(currencies): chunks = (len(currencies), min(self._date_chunk_size, len(dts))) else: # h5py crashes if we provide chunks for empty data. chunks = None self._write_metadata() self._write_index_group(dts, currencies) self._write_data_group(dts, currencies, data, chunks) def _write_metadata(self): self._group.attrs['version'] = HDF5_FX_VERSION self._group.attrs['last_updated_utc'] = str(pd.Timestamp.utcnow()) def _write_index_group(self, dts, currencies): """Write content of /index. """ if not is_sorted_ascending(dts): raise ValueError("dts is not sorted") for c in currencies: if not isinstance(c, str) or len(c) != 3: raise ValueError("Invalid currency: {!r}".format(c)) index_group = self._group.create_group(INDEX) self._log_writing(INDEX, DTS) index_group.create_dataset(DTS, data=dts.astype('int64')) self._log_writing(INDEX, CURRENCIES) index_group.create_dataset(CURRENCIES, data=currencies.astype('S3')) def _write_data_group(self, dts, currencies, data, chunks): """Write content of /data. """ data_group = self._group.create_group(DATA) expected_shape = (len(dts), len(currencies)) for rate, quote, array in data: if array.shape != expected_shape: raise ValueError( "Unexpected shape for rate={}, quote={}." "\nExpected shape: {}. Got {}." .format(rate, quote, expected_shape, array.shape) ) self._log_writing(DATA, rate, quote) target = data_group.require_group('/'.join((rate, quote))) # Transpose the rates array so that the hdf5 file holds arrays # with currencies as row labels and dates as column labels. This # helps with compression, as the *rows* (rather than the columns) # all have similar values, which lends itself to the HDF5 file's # C-contiguous storage. target.create_dataset(RATES, data=array.transpose(), chunks=chunks, compression='lzf', shuffle=True) def _log_writing(self, *path): log.debug("Writing {}", '/'.join(path))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/data/fx/hdf5.py
hdf5.py
import sys from textwrap import dedent class _Sentinel(object): """Base class for Sentinel objects. """ __slots__ = ('__weakref__',) def is_sentinel(obj): return isinstance(obj, _Sentinel) def sentinel(name, doc=None): try: value = sentinel._cache[name] # memoized except KeyError: pass else: if doc == value.__doc__: return value raise ValueError(dedent( """\ New sentinel value %r conflicts with an existing sentinel of the same name. Old sentinel docstring: %r New sentinel docstring: %r The old sentinel was created at: %s Resolve this conflict by changing the name of one of the sentinels. """, ) % (name, value.__doc__, doc, value._created_at)) try: frame = sys._getframe(1) except ValueError: frame = None if frame is None: created_at = '<unknown>' else: created_at = '%s:%s' % (frame.f_code.co_filename, frame.f_lineno) @object.__new__ # bind a single instance to the name 'Sentinel' class Sentinel(_Sentinel): __doc__ = doc __name__ = name # store created_at so that we can report this in case of a duplicate # name violation _created_at = created_at def __new__(cls): raise TypeError('cannot create %r instances' % name) def __repr__(self): return 'sentinel(%r)' % name def __reduce__(self): return sentinel, (name, doc) def __deepcopy__(self, _memo): return self def __copy__(self): return self cls = type(Sentinel) try: cls.__module__ = frame.f_globals['__name__'] except (AttributeError, KeyError): # Couldn't get the name from the calling scope, just use None. # AttributeError is when frame is None, KeyError is when f_globals # doesn't hold '__name__' cls.__module__ = None sentinel._cache[name] = Sentinel # cache result return Sentinel sentinel._cache = {}
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/sentinel.py
sentinel.py
import datetime from copy import deepcopy import numpy as np import pandas as pd def _ensure_index(x): if not isinstance(x, pd.Index): x = pd.Index(sorted(x)) return x class RollingPanel(object): """ Preallocation strategies for rolling window over expanding data set Restrictions: major_axis can only be a DatetimeIndex for now """ def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64, initial_dates=None): self._pos = window self._window = window self.items = _ensure_index(items) self.minor_axis = _ensure_index(sids) self.cap_multiple = cap_multiple self.dtype = dtype if initial_dates is None: self.date_buf = np.empty(self.cap, dtype='M8[ns]') * pd.NaT elif len(initial_dates) != window: raise ValueError('initial_dates must be of length window') else: self.date_buf = np.hstack( ( initial_dates, np.empty( window * (cap_multiple - 1), dtype='datetime64[ns]', ), ), ) self.buffer = self._create_buffer() @property def cap(self): return self.cap_multiple * self._window @property def _start_index(self): return self._pos - self._window @property def start_date(self): return self.date_buf[self._start_index] def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._start_index, :] return self.buffer.iloc[:, self._start_index, :] def set_minor_axis(self, minor_axis): self.minor_axis = _ensure_index(minor_axis) self.buffer = self.buffer.reindex(minor_axis=self.minor_axis) def set_items(self, items): self.items = _ensure_index(items) self.buffer = self.buffer.reindex(items=self.items) def _create_buffer(self): panel = pd.Panel( items=self.items, minor_axis=self.minor_axis, major_axis=range(self.cap), dtype=self.dtype, ) return panel def extend_back(self, missing_dts): """ Resizes the buffer to hold a new window with a new cap_multiple. If cap_multiple is None, then the old cap_multiple is used. """ delta = len(missing_dts) if not delta: raise ValueError( 'missing_dts must be a non-empty index', ) self._window += delta self._pos += delta self.date_buf = self.date_buf.copy() self.date_buf.resize(self.cap) self.date_buf = np.roll(self.date_buf, delta) old_vals = self.buffer.values shape = old_vals.shape nan_arr = np.empty((shape[0], delta, shape[2])) nan_arr.fill(np.nan) new_vals = np.column_stack( (nan_arr, old_vals, np.empty((shape[0], delta * (self.cap_multiple - 1), shape[2]))), ) self.buffer = pd.Panel( data=new_vals, items=self.items, minor_axis=self.minor_axis, major_axis=np.arange(self.cap), dtype=self.dtype, ) # Fill the delta with the dates we calculated. where = slice(self._start_index, self._start_index + delta) self.date_buf[where] = missing_dts def add_frame(self, tick, frame, minor_axis=None, items=None): """ """ if self._pos == self.cap: self._roll_data() values = frame if isinstance(frame, pd.DataFrame): values = frame.values self.buffer.values[:, self._pos, :] = values.astype(self.dtype) self.date_buf[self._pos] = tick self._pos += 1 def get_current(self, item=None, raw=False, start=None, end=None): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ item_indexer = slice(None) if item: item_indexer = self.items.get_loc(item) start_index = self._start_index end_index = self._pos # get inital date window where = slice(start_index, end_index) current_dates = self.date_buf[where] def convert_datelike_to_long(dt): if isinstance(dt, pd.Timestamp): return dt.asm8 if isinstance(dt, datetime.datetime): return np.datetime64(dt) return dt # constrict further by date if start: start = convert_datelike_to_long(start) start_index += current_dates.searchsorted(start) if end: end = convert_datelike_to_long(end) _end = current_dates.searchsorted(end, 'right') end_index -= len(current_dates) - _end where = slice(start_index, end_index) values = self.buffer.values[item_indexer, where, :] current_dates = self.date_buf[where] if raw: # return copy so we can change it without side effects here return values.copy() major_axis = pd.DatetimeIndex(deepcopy(current_dates), tz='utc') if values.ndim == 3: return pd.Panel(values, self.items, major_axis, self.minor_axis, dtype=self.dtype) elif values.ndim == 2: return pd.DataFrame(values, major_axis, self.minor_axis, dtype=self.dtype) def set_current(self, panel): """ Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current. """ where = slice(self._start_index, self._pos) self.buffer.values[:, where, :] = panel.values def current_dates(self): where = slice(self._start_index, self._pos) return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc') def _roll_data(self): """ Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration """ self.buffer.values[:, :self._window, :] = \ self.buffer.values[:, -self._window:, :] self.date_buf[:self._window] = self.date_buf[-self._window:] self._pos = self._window @property def window_length(self): return self._window class MutableIndexRollingPanel(object): """ A version of RollingPanel that exists for backwards compatibility with batch_transform. This is a copy to allow behavior of RollingPanel to drift away from this without breaking this class. This code should be considered frozen, and should not be used in the future. Instead, see RollingPanel. """ def __init__(self, window, items, sids, cap_multiple=2, dtype=np.float64): self._pos = 0 self._window = window self.items = _ensure_index(items) self.minor_axis = _ensure_index(sids) self.cap_multiple = cap_multiple self.cap = cap_multiple * window self.dtype = dtype self.date_buf = np.empty(self.cap, dtype='M8[ns]') self.buffer = self._create_buffer() def _oldest_frame_idx(self): return max(self._pos - self._window, 0) def oldest_frame(self, raw=False): """ Get the oldest frame in the panel. """ if raw: return self.buffer.values[:, self._oldest_frame_idx(), :] return self.buffer.iloc[:, self._oldest_frame_idx(), :] def set_sids(self, sids): self.minor_axis = _ensure_index(sids) self.buffer = self.buffer.reindex(minor_axis=self.minor_axis) def _create_buffer(self): panel = pd.Panel( items=self.items, minor_axis=self.minor_axis, major_axis=range(self.cap), dtype=self.dtype, ) return panel def get_current(self): """ Get a Panel that is the current data in view. It is not safe to persist these objects because internal data might change """ where = slice(self._oldest_frame_idx(), self._pos) major_axis = pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc') return pd.Panel(self.buffer.values[:, where, :], self.items, major_axis, self.minor_axis, dtype=self.dtype) def set_current(self, panel): """ Set the values stored in our current in-view data to be values of the passed panel. The passed panel must have the same indices as the panel that would be returned by self.get_current. """ where = slice(self._oldest_frame_idx(), self._pos) self.buffer.values[:, where, :] = panel.values def current_dates(self): where = slice(self._oldest_frame_idx(), self._pos) return pd.DatetimeIndex(deepcopy(self.date_buf[where]), tz='utc') def _roll_data(self): """ Roll window worth of data up to position zero. Save the effort of having to expensively roll at each iteration """ self.buffer.values[:, :self._window, :] = \ self.buffer.values[:, -self._window:, :] self.date_buf[:self._window] = self.date_buf[-self._window:] self._pos = self._window def add_frame(self, tick, frame, minor_axis=None, items=None): """ """ if self._pos == self.cap: self._roll_data() if isinstance(frame, pd.DataFrame): minor_axis = frame.columns items = frame.index if set(minor_axis).difference(set(self.minor_axis)) or \ set(items).difference(set(self.items)): self._update_buffer(frame) vals = frame.T.astype(self.dtype) self.buffer.loc[:, self._pos, :] = vals self.date_buf[self._pos] = tick self._pos += 1 def _update_buffer(self, frame): # Get current frame as we only need to care about the data that is in # the active window old_buffer = self.get_current() if self._pos >= self._window: # Don't count the last major_axis entry if we're past our window, # since it's about to roll off the end of the panel. old_buffer = old_buffer.iloc[:, 1:, :] nans = pd.isnull(old_buffer) # Find minor_axes that have only nans # Note that minor is axis 2 non_nan_cols = set(old_buffer.minor_axis[~np.all(nans, axis=(0, 1))]) # Determine new columns to be added new_cols = set(frame.columns).difference(non_nan_cols) # Update internal minor axis self.minor_axis = _ensure_index(new_cols.union(non_nan_cols)) # Same for items (fields) # Find items axes that have only nans # Note that items is axis 0 non_nan_items = set(old_buffer.items[~np.all(nans, axis=(1, 2))]) new_items = set(frame.index).difference(non_nan_items) self.items = _ensure_index(new_items.union(non_nan_items)) # :NOTE: # There is a simpler and 10x faster way to do this: # # Reindex buffer to update axes (automatically adds nans) # self.buffer = self.buffer.reindex(items=self.items, # major_axis=np.arange(self.cap), # minor_axis=self.minor_axis) # # However, pandas==0.12.0, for which we remain backwards compatible, # has a bug in .reindex() that this triggers. Using .update() as before # seems to work fine. new_buffer = self._create_buffer() new_buffer.update( self.buffer.loc[non_nan_items, :, non_nan_cols]) self.buffer = new_buffer
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/data.py
data.py
from functools import reduce from operator import itemgetter from pprint import pformat from six import viewkeys, iteritems from six.moves import map, zip from toolz import curry, flip from .sentinel import sentinel @curry def apply(f, *args, **kwargs): """Apply a function to arguments. Parameters ---------- f : callable The function to call. *args, **kwargs **kwargs Arguments to feed to the callable. Returns ------- a : any The result of ``f(*args, **kwargs)`` Examples -------- >>> from toolz.curried.operator import add, sub >>> fs = add(1), sub(1) >>> tuple(map(apply, fs, (1, 2))) (2, -1) Class decorator >>> instance = apply >>> @instance ... class obj: ... def f(self): ... return 'f' ... >>> obj.f() 'f' >>> issubclass(obj, object) Traceback (most recent call last): ... TypeError: issubclass() arg 1 must be a class >>> isinstance(obj, type) False See Also -------- unpack_apply mapply """ return f(*args, **kwargs) # Alias for use as a class decorator. instance = apply def mapall(funcs, seq): """ Parameters ---------- funcs : iterable[function] Sequence of functions to map over `seq`. seq : iterable Sequence over which to map funcs. Yields ------ elem : object Concatenated result of mapping each ``func`` over ``seq``. Examples -------- >>> list(mapall([lambda x: x + 1, lambda x: x - 1], [1, 2, 3])) [2, 3, 4, 0, 1, 2] """ for func in funcs: for elem in seq: yield func(elem) def same(*values): """ Check if all values in a sequence are equal. Returns True on empty sequences. Examples -------- >>> same(1, 1, 1, 1) True >>> same(1, 2, 1) False >>> same() True """ if not values: return True first, rest = values[0], values[1:] return all(value == first for value in rest) def _format_unequal_keys(dicts): return pformat([sorted(d.keys()) for d in dicts]) def dzip_exact(*dicts): """ Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Examples -------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True """ if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]} def _gen_unzip(it, elem_len): """Helper for unzip which checks the lengths of each element in it. Parameters ---------- it : iterable[tuple] An iterable of tuples. ``unzip`` should map ensure that these are already tuples. elem_len : int or None The expected element length. If this is None it is infered from the length of the first element. Yields ------ elem : tuple Each element of ``it``. Raises ------ ValueError Raised when the lengths do not match the ``elem_len``. """ try: elem = next(it) except: # in python 3.7 this raises a RuntimeError: generator raised StopIteration return first_elem_len = len(elem) if elem_len is not None and elem_len != first_elem_len: raise ValueError( 'element at index 0 was length %d, expected %d' % ( first_elem_len, elem_len, ) ) else: elem_len = first_elem_len yield elem for n, elem in enumerate(it, 1): if len(elem) != elem_len: raise ValueError( 'element at index %d was length %d, expected %d' % ( n, len(elem), elem_len, ), ) yield elem def unzip(seq, elem_len=None): """Unzip a length n sequence of length m sequences into m seperate length n sequences. Parameters ---------- seq : iterable[iterable] The sequence to unzip. elem_len : int, optional The expected length of each element of ``seq``. If not provided this will be infered from the length of the first element of ``seq``. This can be used to ensure that code like: ``a, b = unzip(seq)`` does not fail even when ``seq`` is empty. Returns ------- seqs : iterable[iterable] The new sequences pulled out of the first iterable. Raises ------ ValueError Raised when ``seq`` is empty and ``elem_len`` is not provided. Raised when elements of ``seq`` do not match the given ``elem_len`` or the length of the first element of ``seq``. Examples -------- >>> seq = [('a', 1), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq) >>> cs ('a', 'b', 'c') >>> ns (1, 2, 3) # checks that the elements are the same length >>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')] >>> cs, ns = unzip(seq) Traceback (most recent call last): ... ValueError: element at index 2 was length 3, expected 2 # allows an explicit element length instead of infering >>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)] >>> cs, ns = unzip(seq, 2) Traceback (most recent call last): ... ValueError: element at index 0 was length 3, expected 2 # handles empty sequences when a length is given >>> cs, ns = unzip([], elem_len=2) >>> cs == ns == () True Notes ----- This function will force ``seq`` to completion. """ ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len))) if ret: return ret if elem_len is None: raise ValueError("cannot unzip empty sequence without 'elem_len'") return ((),) * elem_len _no_default = sentinel('_no_default') def getattrs(value, attrs, default=_no_default): """ Perform a chained application of ``getattr`` on ``value`` with the values in ``attrs``. If ``default`` is supplied, return it if any of the attribute lookups fail. Parameters ---------- value : object Root of the lookup chain. attrs : iterable[str] Sequence of attributes to look up. default : object, optional Value to return if any of the lookups fail. Returns ------- result : object Result of the lookup sequence. Examples -------- >>> class EmptyObject(object): ... pass ... >>> obj = EmptyObject() >>> obj.foo = EmptyObject() >>> obj.foo.bar = "value" >>> getattrs(obj, ('foo', 'bar')) 'value' >>> getattrs(obj, ('foo', 'buzz')) Traceback (most recent call last): ... AttributeError: 'EmptyObject' object has no attribute 'buzz' >>> getattrs(obj, ('foo', 'buzz'), 'default') 'default' """ try: for attr in attrs: value = getattr(value, attr) except AttributeError: if default is _no_default: raise value = default return value @curry def set_attribute(name, value): """ Decorator factory for setting attributes on a function. Doesn't change the behavior of the wrapped function. Examples -------- >>> @set_attribute('__name__', 'foo') ... def bar(): ... return 3 ... >>> bar() 3 >>> bar.__name__ 'foo' """ def decorator(f): setattr(f, name, value) return f return decorator # Decorators for setting the __name__ and __doc__ properties of a decorated # function. # Example: with_name = set_attribute('__name__') with_doc = set_attribute('__doc__') def foldr(f, seq, default=_no_default): """Fold a function over a sequence with right associativity. Parameters ---------- f : callable[any, any] The function to reduce the sequence with. The first argument will be the element of the sequence; the second argument will be the accumulator. seq : iterable[any] The sequence to reduce. default : any, optional The starting value to reduce with. If not provided, the sequence cannot be empty, and the last value of the sequence will be used. Returns ------- folded : any The folded value. Notes ----- This functions works by reducing the list in a right associative way. For example, imagine we are folding with ``operator.add`` or ``+``: .. code-block:: python foldr(add, seq) -> seq[0] + (seq[1] + (seq[2] + (...seq[-1], default))) In the more general case with an arbitrary function, ``foldr`` will expand like so: .. code-block:: python foldr(f, seq) -> f(seq[0], f(seq[1], f(seq[2], ...f(seq[-1], default)))) For a more in depth discussion of left and right folds, see: `https://en.wikipedia.org/wiki/Fold_(higher-order_function)`_ The images in that page are very good for showing the differences between ``foldr`` and ``foldl`` (``reduce``). .. note:: For performance reasons is is best to pass a strict (non-lazy) sequence, for example, a list. See Also -------- :func:`functools.reduce` :func:`sum` """ return reduce( flip(f), reversed(seq), *(default,) if default is not _no_default else () ) def invert(d): """ Invert a dictionary into a dictionary of sets. >>> invert({'a': 1, 'b': 2, 'c': 1}) # doctest: +SKIP {1: {'a', 'c'}, 2: {'b'}} """ out = {} for k, v in iteritems(d): try: out[v].add(k) except KeyError: out[v] = {k} return out def keysorted(d): """Get the items from a dict, sorted by key. Example ------- >>> keysorted({'c': 1, 'b': 2, 'a': 3}) [('a', 3), ('b', 2), ('c', 1)] """ return sorted(iteritems(d), key=itemgetter(0))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/functional.py
functional.py
from six.moves import map as imap from toolz import compose, identity class ApplyAsyncResult(object): """An object that boxes results for calls to :meth:`~zipline.utils.pool.SequentialPool.apply_async`. Parameters ---------- value : any The result of calling the function, or any exception that was raised. successful : bool If ``True``, ``value`` is the return value of the function. If ``False``, ``value`` is the exception that was raised when calling the functions. """ def __init__(self, value, successful): self._value = value self._successful = successful def successful(self): """Did the function execute without raising an exception? """ return self._successful def get(self): """Return the result of calling the function or reraise any exceptions that were raised. """ if not self._successful: raise self._value return self._value def ready(self): """Has the function finished executing. Notes ----- In the :class:`~zipline.utils.pool.SequentialPool` case, this is always ``True``. """ return True def wait(self): """Wait until the function is finished executing. Notes ----- In the :class:`~zipline.utils.pool.SequentialPool` case, this is a nop because the function is computed eagerly in the same thread as the call to :meth:`~zipline.utils.pool.SequentialPool.apply_async`. """ pass class SequentialPool(object): """A dummy pool object that iterates sequentially in a single thread. Methods ------- map(f: callable[A, B], iterable: iterable[A]) -> list[B] Apply a function to each of the elements of ``iterable``. imap(f: callable[A, B], iterable: iterable[A]) -> iterable[B] Lazily apply a function to each of the elements of ``iterable``. imap_unordered(f: callable[A, B], iterable: iterable[A]) -> iterable[B] Lazily apply a function to each of the elements of ``iterable`` but yield values as they become available. The resulting iterable is unordered. Notes ----- This object is useful for testing to mock out the ``Pool`` interface provided by gevent or multiprocessing. See Also -------- :class:`multiprocessing.Pool` """ map = staticmethod(compose(list, imap)) imap = imap_unordered = staticmethod(imap) @staticmethod def apply_async(f, args=(), kwargs=None, callback=None): """Apply a function but emulate the API of an asynchronous call. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- future : ApplyAsyncResult The result of calling the function boxed in a future-like api. Notes ----- This calls the function eagerly but wraps it so that ``SequentialPool`` can be used where a :class:`multiprocessing.Pool` or :class:`gevent.pool.Pool` would be used. """ try: value = (identity if callback is None else callback)( f(*args, **kwargs or {}), ) successful = True except Exception as e: value = e successful = False return ApplyAsyncResult(value, successful) @staticmethod def apply(f, args=(), kwargs=None): """Apply a function. Parameters ---------- f : callable The function to call. args : tuple, optional The positional arguments. kwargs : dict, optional The keyword arguments. Returns ------- result : any f(*args, **kwargs) """ return f(*args, **kwargs or {}) @staticmethod def close(): pass @staticmethod def join(): pass
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/pool.py
pool.py
import re from six import iteritems from textwrap import dedent from toolz import curry PIPELINE_DOWNSAMPLING_FREQUENCY_DOC = dedent( """\ frequency : {'year_start', 'quarter_start', 'month_start', 'week_start'} A string indicating desired sampling dates: * 'year_start' -> first trading day of each year * 'quarter_start' -> first trading day of January, April, July, October * 'month_start' -> first trading day of each month * 'week_start' -> first trading_day of each week """ ) PIPELINE_ALIAS_NAME_DOC = dedent( """\ name : str The name to alias this term as. """, ) def pad_lines_after_first(prefix, s): """Apply a prefix to each line in s after the first.""" return ('\n' + prefix).join(s.splitlines()) def format_docstring(owner_name, docstring, formatters): """ Template ``formatters`` into ``docstring``. Parameters ---------- owner_name : str The name of the function or class whose docstring is being templated. Only used for error messages. docstring : str The docstring to template. formatters : dict[str -> str] Parameters for a a str.format() call on ``docstring``. Multi-line values in ``formatters`` will have leading whitespace padded to match the leading whitespace of the substitution string. """ # Build a dict of parameters to a vanilla format() call by searching for # each entry in **formatters and applying any leading whitespace to each # line in the desired substitution. format_params = {} for target, doc_for_target in iteritems(formatters): # Search for '{name}', with optional leading whitespace. regex = re.compile(r'^(\s*)' + '({' + target + '})$', re.MULTILINE) matches = regex.findall(docstring) if not matches: raise ValueError( "Couldn't find template for parameter {!r} in docstring " "for {}." "\nParameter name must be alone on a line surrounded by " "braces.".format(target, owner_name), ) elif len(matches) > 1: raise ValueError( "Couldn't found multiple templates for parameter {!r}" "in docstring for {}." "\nParameter should only appear once.".format( target, owner_name ) ) (leading_whitespace, _) = matches[0] format_params[target] = pad_lines_after_first( leading_whitespace, doc_for_target, ) return docstring.format(**format_params) def templated_docstring(**docs): """ Decorator allowing the use of templated docstrings. Examples -------- >>> @templated_docstring(foo='bar') ... def my_func(self, foo): ... '''{foo}''' ... >>> my_func.__doc__ 'bar' """ def decorator(f): f.__doc__ = format_docstring(f.__name__, f.__doc__, docs) return f return decorator @curry def copydoc(from_, to): """Copies the docstring from one function to another. Parameters ---------- from_ : any The object to copy the docstring from. to : any The object to copy the docstring to. Returns ------- to : any ``to`` with the docstring from ``from_`` """ to.__doc__ = from_.__doc__ return to
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/sharedoc.py
sharedoc.py
from abc import ABCMeta, abstractmethod from six import with_metaclass, iteritems # Consistent error to be thrown in various cases regarding overriding # `final` attributes. _type_error = TypeError('Cannot override final attribute') def bases_mro(bases): """ Yield classes in the order that methods should be looked up from the base classes of an object. """ for base in bases: for class_ in base.__mro__: yield class_ def is_final(name, mro): """ Checks if `name` is a `final` object in the given `mro`. We need to check the mro because we need to directly go into the __dict__ of the classes. Because `final` objects are descriptor, we need to grab them _BEFORE_ the `__call__` is invoked. """ return any(isinstance(getattr(c, '__dict__', {}).get(name), final) for c in bases_mro(mro)) class FinalMeta(type): """A metaclass template for classes the want to prevent subclassess from overriding a some methods or attributes. """ def __new__(mcls, name, bases, dict_): for k, v in iteritems(dict_): if is_final(k, bases): raise _type_error setattr_ = dict_.get('__setattr__') if setattr_ is None: # No `__setattr__` was explicitly defined, look up the super # class's. `bases[0]` will have a `__setattr__` because # `object` does so we don't need to worry about the mro. setattr_ = bases[0].__setattr__ if not is_final('__setattr__', bases) \ and not isinstance(setattr_, final): # implicitly make the `__setattr__` a `final` object so that # users cannot just avoid the descriptor protocol. dict_['__setattr__'] = final(setattr_) return super(FinalMeta, mcls).__new__(mcls, name, bases, dict_) def __setattr__(self, name, value): """This stops the `final` attributes from being reassigned on the class object. """ if is_final(name, self.__mro__): raise _type_error super(FinalMeta, self).__setattr__(name, value) class final(with_metaclass(ABCMeta)): """ An attribute that cannot be overridden. This is like the final modifier in Java. Example usage: >>> from six import with_metaclass >>> class C(with_metaclass(FinalMeta, object)): ... @final ... def f(self): ... return 'value' ... This constructs a class with final method `f`. This cannot be overridden on the class object or on any instance. You cannot override this by subclassing `C`; attempting to do so will raise a `TypeError` at class construction time. """ def __new__(cls, attr): # Decide if this is a method wrapper or an attribute wrapper. # We are going to cache the `callable` check by creating a # method or attribute wrapper. if hasattr(attr, '__get__'): return object.__new__(finaldescriptor) else: return object.__new__(finalvalue) def __init__(self, attr): self._attr = attr def __set__(self, instance, value): """ `final` objects cannot be reassigned. This is the most import concept about `final`s. Unlike a `property` object, this will raise a `TypeError` when you attempt to reassign it. """ raise _type_error @abstractmethod def __get__(self, instance, owner): raise NotImplementedError('__get__') class finalvalue(final): """ A wrapper for a non-descriptor attribute. """ def __get__(self, instance, owner): return self._attr class finaldescriptor(final): """ A final wrapper around a descriptor. """ def __get__(self, instance, owner): return self._attr.__get__(instance, owner)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/final.py
final.py
import warnings from datetime import datetime from os import listdir import os.path import pandas as pd import pytz import zipline from zipline.errors import SymbolNotFound from zipline.finance.asset_restrictions import SecurityListRestrictions from zipline.zipline_warnings import ZiplineDeprecationWarning DATE_FORMAT = "%Y%m%d" zipline_dir = os.path.dirname(zipline.__file__) SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists') class SecurityList(object): def __init__(self, data, current_date_func, asset_finder): """ data: a nested dictionary: knowledge_date -> lookup_date -> {add: [symbol list], 'delete': []}, delete: [symbol list]} current_date_func: function taking no parameters, returning current datetime """ self.data = data self._cache = {} self._knowledge_dates = self.make_knowledge_dates(self.data) self.current_date = current_date_func self.count = 0 self._current_set = set() self.asset_finder = asset_finder def make_knowledge_dates(self, data): knowledge_dates = sorted( [pd.Timestamp(k) for k in data.keys()]) return knowledge_dates def __iter__(self): warnings.warn( 'Iterating over security_lists is deprecated. Use ' '`for sid in <security_list>.current_securities(dt)` instead.', category=ZiplineDeprecationWarning, stacklevel=2 ) return iter(self.current_securities(self.current_date())) def __contains__(self, item): warnings.warn( 'Evaluating inclusion in security_lists is deprecated. Use ' '`sid in <security_list>.current_securities(dt)` instead.', category=ZiplineDeprecationWarning, stacklevel=2 ) return item in self.current_securities(self.current_date()) def current_securities(self, dt): for kd in self._knowledge_dates: if dt < kd: break if kd in self._cache: self._current_set = self._cache[kd] continue for effective_date, changes in iter(self.data[kd].items()): self.update_current( effective_date, changes['add'], self._current_set.add ) self.update_current( effective_date, changes['delete'], self._current_set.remove ) self._cache[kd] = self._current_set return self._current_set def update_current(self, effective_date, symbols, change_func): for symbol in symbols: try: asset = self.asset_finder.lookup_symbol( symbol, as_of_date=effective_date ) # Pass if no Asset exists for the symbol except SymbolNotFound: continue change_func(asset.sid) class SecurityListSet(object): # provide a cut point to substitute other security # list implementations. security_list_type = SecurityList def __init__(self, current_date_func, asset_finder): self.current_date_func = current_date_func self.asset_finder = asset_finder self._leveraged_etf = None @property def leveraged_etf_list(self): if self._leveraged_etf is None: self._leveraged_etf = self.security_list_type( load_from_directory('leveraged_etf_list'), self.current_date_func, asset_finder=self.asset_finder ) return self._leveraged_etf @property def restrict_leveraged_etfs(self): return SecurityListRestrictions(self.leveraged_etf_list) def load_from_directory(list_name): """ To resolve the symbol in the LEVERAGED_ETF list, the date on which the symbol was in effect is needed. Furthermore, to maintain a point in time record of our own maintenance of the restricted list, we need a knowledge date. Thus, restricted lists are dictionaries of datetime->symbol lists. new symbols should be entered as a new knowledge date entry. This method assumes a directory structure of: SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt The return value is a dictionary with: knowledge_date -> lookup_date -> {add: [symbol list], 'delete': [symbol list]} """ data = {} dir_path = os.path.join(SECURITY_LISTS_DIR, list_name) for kd_name in listdir(dir_path): kd = datetime.strptime(kd_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd] = {} kd_path = os.path.join(dir_path, kd_name) for ld_name in listdir(kd_path): ld = datetime.strptime(ld_name, DATE_FORMAT).replace( tzinfo=pytz.utc) data[kd][ld] = {} ld_path = os.path.join(kd_path, ld_name) for fname in listdir(ld_path): fpath = os.path.join(ld_path, fname) with open(fpath) as f: symbols = f.read().splitlines() data[kd][ld][fname] = symbols return data
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/security_list.py
security_list.py
import pandas as pd import pytz # import warnings from datetime import datetime from dateutil import rrule from functools import partial # from zipline.zipline_warnings import ZiplineDeprecationWarning # IMPORTANT: This module is deprecated and is only here for temporary backwards # compatibility. Look at the `trading-calendars` # module, as well as the calendar definitions in `trading_calendars`. # TODO: The new calendar API is currently in flux, so the deprecation # warning for this module is currently disabled. Re-enable once # the new API is stabilized. # # warnings.warn( # "The `tradingcalendar` module is deprecated. See the " # "`trading-calendars` module, as well as the " # "calendar definitions in `trading-calendars`.", # category=ZiplineDeprecationWarning, # stacklevel=1, # ) start = pd.Timestamp('1990-01-01', tz='UTC') end_base = pd.Timestamp('today', tz='UTC') # Give an aggressive buffer for logic that needs to use the next trading # day or minute. end = end_base + pd.Timedelta(days=365) def canonicalize_datetime(dt): # Strip out any HHMMSS or timezone info in the user's datetime, so that # all the datetimes we return will be 00:00:00 UTC. return datetime(dt.year, dt.month, dt.day, tzinfo=pytz.utc) def get_non_trading_days(start, end): non_trading_rules = [] start = canonicalize_datetime(start) end = canonicalize_datetime(end) weekends = rrule.rrule( rrule.YEARLY, byweekday=(rrule.SA, rrule.SU), cache=True, dtstart=start, until=end ) non_trading_rules.append(weekends) new_years = rrule.rrule( rrule.MONTHLY, byyearday=1, cache=True, dtstart=start, until=end ) non_trading_rules.append(new_years) new_years_sunday = rrule.rrule( rrule.MONTHLY, byyearday=2, byweekday=rrule.MO, cache=True, dtstart=start, until=end ) non_trading_rules.append(new_years_sunday) mlk_day = rrule.rrule( rrule.MONTHLY, bymonth=1, byweekday=(rrule.MO(+3)), cache=True, dtstart=datetime(1998, 1, 1, tzinfo=pytz.utc), until=end ) non_trading_rules.append(mlk_day) presidents_day = rrule.rrule( rrule.MONTHLY, bymonth=2, byweekday=(rrule.MO(3)), cache=True, dtstart=start, until=end ) non_trading_rules.append(presidents_day) good_friday = rrule.rrule( rrule.DAILY, byeaster=-2, cache=True, dtstart=start, until=end ) non_trading_rules.append(good_friday) memorial_day = rrule.rrule( rrule.MONTHLY, bymonth=5, byweekday=(rrule.MO(-1)), cache=True, dtstart=start, until=end ) non_trading_rules.append(memorial_day) july_4th = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=4, cache=True, dtstart=start, until=end ) non_trading_rules.append(july_4th) july_4th_sunday = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=5, byweekday=rrule.MO, cache=True, dtstart=start, until=end ) non_trading_rules.append(july_4th_sunday) july_4th_saturday = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=3, byweekday=rrule.FR, cache=True, dtstart=start, until=end ) non_trading_rules.append(july_4th_saturday) labor_day = rrule.rrule( rrule.MONTHLY, bymonth=9, byweekday=(rrule.MO(1)), cache=True, dtstart=start, until=end ) non_trading_rules.append(labor_day) thanksgiving = rrule.rrule( rrule.MONTHLY, bymonth=11, byweekday=(rrule.TH(4)), cache=True, dtstart=start, until=end ) non_trading_rules.append(thanksgiving) christmas = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=25, cache=True, dtstart=start, until=end ) non_trading_rules.append(christmas) christmas_sunday = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=26, byweekday=rrule.MO, cache=True, dtstart=start, until=end ) non_trading_rules.append(christmas_sunday) # If Christmas is a Saturday then 24th, a Friday is observed. christmas_saturday = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=24, byweekday=rrule.FR, cache=True, dtstart=start, until=end ) non_trading_rules.append(christmas_saturday) non_trading_ruleset = rrule.rruleset() for rule in non_trading_rules: non_trading_ruleset.rrule(rule) non_trading_days = non_trading_ruleset.between(start, end, inc=True) # Add September 11th closings # https://en.wikipedia.org/wiki/Aftermath_of_the_September_11_attacks # Due to the terrorist attacks, the stock market did not open on 9/11/2001 # It did not open again until 9/17/2001. # # September 2001 # Su Mo Tu We Th Fr Sa # 1 # 2 3 4 5 6 7 8 # 9 10 11 12 13 14 15 # 16 17 18 19 20 21 22 # 23 24 25 26 27 28 29 # 30 for day_num in range(11, 17): non_trading_days.append( datetime(2001, 9, day_num, tzinfo=pytz.utc)) # Add closings due to Hurricane Sandy in 2012 # https://en.wikipedia.org/wiki/Hurricane_sandy # # The stock exchange was closed due to Hurricane Sandy's # impact on New York. # It closed on 10/29 and 10/30, reopening on 10/31 # October 2012 # Su Mo Tu We Th Fr Sa # 1 2 3 4 5 6 # 7 8 9 10 11 12 13 # 14 15 16 17 18 19 20 # 21 22 23 24 25 26 27 # 28 29 30 31 for day_num in range(29, 31): non_trading_days.append( datetime(2012, 10, day_num, tzinfo=pytz.utc)) # Misc closings from NYSE listing. # http://www.nyse.com/pdfs/closings.pdf # # National Days of Mourning # - President Richard Nixon non_trading_days.append(datetime(1994, 4, 27, tzinfo=pytz.utc)) # - President Ronald W. Reagan - June 11, 2004 non_trading_days.append(datetime(2004, 6, 11, tzinfo=pytz.utc)) # - President Gerald R. Ford - Jan 2, 2007 non_trading_days.append(datetime(2007, 1, 2, tzinfo=pytz.utc)) non_trading_days.sort() return pd.DatetimeIndex(non_trading_days) non_trading_days = get_non_trading_days(start, end) trading_day = pd.tseries.offsets.CDay(holidays=non_trading_days) def get_trading_days(start, end, trading_day=trading_day): return pd.date_range(start=start.date(), end=end.date(), freq=trading_day).tz_localize('UTC') trading_days = get_trading_days(start, end) def get_early_closes(start, end): # 1:00 PM close rules based on # https://quant.stackexchange.com/questions/4083/nyse-early-close-rules-july-4th-and-dec-25th # noqa # and verified against http://www.nyse.com/pdfs/closings.pdf # These rules are valid starting in 1993 start = canonicalize_datetime(start) end = canonicalize_datetime(end) start = max(start, datetime(1993, 1, 1, tzinfo=pytz.utc)) end = max(end, datetime(1993, 1, 1, tzinfo=pytz.utc)) # Not included here are early closes prior to 1993 # or unplanned early closes early_close_rules = [] day_after_thanksgiving = rrule.rrule( rrule.MONTHLY, bymonth=11, # 4th Friday isn't correct if month starts on Friday, so restrict to # day range: byweekday=(rrule.FR), bymonthday=range(23, 30), cache=True, dtstart=start, until=end ) early_close_rules.append(day_after_thanksgiving) christmas_eve = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=24, byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH), cache=True, dtstart=start, until=end ) early_close_rules.append(christmas_eve) friday_after_christmas = rrule.rrule( rrule.MONTHLY, bymonth=12, bymonthday=26, byweekday=rrule.FR, cache=True, dtstart=start, # valid 1993-2007 until=min(end, datetime(2007, 12, 31, tzinfo=pytz.utc)) ) early_close_rules.append(friday_after_christmas) day_before_independence_day = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=3, byweekday=(rrule.MO, rrule.TU, rrule.TH), cache=True, dtstart=start, until=end ) early_close_rules.append(day_before_independence_day) day_after_independence_day = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=5, byweekday=rrule.FR, cache=True, dtstart=start, # starting in 2013: wednesday before independence day until=min(end, datetime(2012, 12, 31, tzinfo=pytz.utc)) ) early_close_rules.append(day_after_independence_day) wednesday_before_independence_day = rrule.rrule( rrule.MONTHLY, bymonth=7, bymonthday=3, byweekday=rrule.WE, cache=True, # starting in 2013 dtstart=max(start, datetime(2013, 1, 1, tzinfo=pytz.utc)), until=max(end, datetime(2013, 1, 1, tzinfo=pytz.utc)) ) early_close_rules.append(wednesday_before_independence_day) early_close_ruleset = rrule.rruleset() for rule in early_close_rules: early_close_ruleset.rrule(rule) early_closes = early_close_ruleset.between(start, end, inc=True) # Misc early closings from NYSE listing. # http://www.nyse.com/pdfs/closings.pdf # # New Year's Eve nye_1999 = datetime(1999, 12, 31, tzinfo=pytz.utc) if start <= nye_1999 and nye_1999 <= end: early_closes.append(nye_1999) early_closes.sort() return pd.DatetimeIndex(early_closes) early_closes = get_early_closes(start, end) def get_open_and_close(day, early_closes): market_open = pd.Timestamp( datetime( year=day.year, month=day.month, day=day.day, hour=9, minute=31), tz='US/Eastern').tz_convert('UTC') # 1 PM if early close, 4 PM otherwise close_hour = 13 if day in early_closes else 16 market_close = pd.Timestamp( datetime( year=day.year, month=day.month, day=day.day, hour=close_hour), tz='US/Eastern').tz_convert('UTC') return market_open, market_close def get_open_and_closes(trading_days, early_closes, get_open_and_close): open_and_closes = pd.DataFrame(index=trading_days, columns=('market_open', 'market_close')) get_o_and_c = partial(get_open_and_close, early_closes=early_closes) open_and_closes['market_open'], open_and_closes['market_close'] = \ zip(*open_and_closes.index.map(get_o_and_c)) return open_and_closes open_and_closes = get_open_and_closes(trading_days, early_closes, get_open_and_close)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/tradingcalendar.py
tradingcalendar.py
from ctypes import ( Structure, c_ubyte, c_uint, c_ulong, c_ulonglong, c_ushort, sizeof, ) import numpy as np import pandas as pd from six.moves import range _inttypes_map = { sizeof(t) - 1: t for t in { c_ubyte, c_uint, c_ulong, c_ulonglong, c_ushort } } _inttypes = pd.Series(_inttypes_map).\ reindex(range(max(_inttypes_map.keys())+1)).\ bfill().\ tolist() def enum(option, *options): """ Construct a new enum object. Parameters ---------- *options : iterable of str The names of the fields for the enum. Returns ------- enum A new enum collection. Examples -------- >>> e = enum('a', 'b', 'c') >>> e <enum: ('a', 'b', 'c')> >>> e.a 0 >>> e.b 1 >>> e.a in e True >>> tuple(e) (0, 1, 2) Notes ----- Identity checking is not guaranteed to work with enum members, instead equality checks should be used. From CPython's documentation: "The current implementation keeps an array of integer objects for all integers between -5 and 256, when you create an int in that range you actually just get back a reference to the existing object. So it should be possible to change the value of 1. I suspect the behaviour of Python in this case is undefined. :-)" """ options = (option,) + options rangeob = range(len(options)) try: inttype = _inttypes[int(np.log2(len(options) - 1)) // 8] except IndexError: raise OverflowError( 'Cannot store enums with more than sys.maxsize elements, got %d' % len(options), ) class _enum(Structure): _fields_ = [(o, inttype) for o in options] def __iter__(self): return iter(rangeob) def __contains__(self, value): return 0 <= value < len(options) def __repr__(self): return '<enum: %s>' % ( ('%d fields' % len(options)) if len(options) > 10 else repr(options) ) return _enum(*rangeob)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/enum.py
enum.py
from operator import attrgetter import six def compose_types(a, *cs): """Compose multiple classes together. Parameters ---------- *mcls : tuple[type] The classes that you would like to compose Returns ------- cls : type A type that subclasses all of the types in ``mcls``. Notes ----- A common use case for this is to build composed metaclasses, for example, imagine you have some simple metaclass ``M`` and some instance of ``M`` named ``C`` like so: .. code-block:: python >>> class M(type): ... def __new__(mcls, name, bases, dict_): ... dict_['ayy'] = 'lmao' ... return super(M, mcls).__new__(mcls, name, bases, dict_) >>> from six import with_metaclass >>> class C(with_metaclass(M, object)): ... pass We now want to create a sublclass of ``C`` that is also an abstract class. We can use ``compose_types`` to create a new metaclass that is a subclass of ``M`` and ``ABCMeta``. This is needed because a subclass of a class with a metaclass must have a metaclass which is a subclass of the metaclass of the superclass. .. code-block:: python >>> from abc import ABCMeta, abstractmethod >>> class D(with_metaclass(compose_types(M, ABCMeta), C)): ... @abstractmethod ... def f(self): ... raise NotImplementedError('f') We can see that this class has both metaclasses applied to it: .. code-block:: python >>> D.ayy 'lmao' >>> D() Traceback (most recent call last): ... TypeError: Can't instantiate abstract class D with abstract methods f An important note here is that ``M`` did not use ``type.__new__`` and instead used ``super()``. This is to support cooperative multiple inheritance which is needed for ``compose_types`` to work as intended. After we have composed these types ``M.__new__``\'s super will actually go to ``ABCMeta.__new__`` and not ``type.__new__``. Always using ``super()`` to dispatch to your superclass is best practices anyways so most classes should compose without much special considerations. """ if not cs: # if there are no types to compose then just return the single type return a mcls = (a,) + cs return type( 'compose_types(%s)' % ', '.join(map(attrgetter('__name__'), mcls)), mcls, {}, ) def with_metaclasses(metaclasses, *bases): """Make a class inheriting from ``bases`` whose metaclass inherits from all of ``metaclasses``. Like :func:`six.with_metaclass`, but allows multiple metaclasses. Parameters ---------- metaclasses : iterable[type] A tuple of types to use as metaclasses. *bases : tuple[type] A tuple of types to use as bases. Returns ------- base : type A subtype of ``bases`` whose metaclass is a subtype of ``metaclasses``. Notes ----- The metaclasses must be written to support cooperative multiple inheritance. This means that they must delegate all calls to ``super()`` instead of inlining their super class by name. """ return six.with_metaclass(compose_types(*metaclasses), *bases)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/metautils.py
metautils.py
from collections import OrderedDict from datetime import datetime from distutils.version import StrictVersion from warnings import ( catch_warnings, filterwarnings, ) import six import numpy as np from numpy import ( array_equal, broadcast, busday_count, datetime64, diff, dtype, empty, flatnonzero, hstack, isnan, nan, vectorize, where ) from numpy.lib.stride_tricks import as_strided from toolz import flip numpy_version = StrictVersion(np.__version__) uint8_dtype = dtype('uint8') bool_dtype = dtype('bool') uint32_dtype = dtype('uint32') uint64_dtype = dtype('uint64') int64_dtype = dtype('int64') float32_dtype = dtype('float32') float64_dtype = dtype('float64') complex128_dtype = dtype('complex128') datetime64D_dtype = dtype('datetime64[D]') datetime64ns_dtype = dtype('datetime64[ns]') object_dtype = dtype('O') # We use object arrays for strings. categorical_dtype = object_dtype make_datetime64ns = flip(datetime64, 'ns') make_datetime64D = flip(datetime64, 'D') # Array compare that works across versions of numpy try: assert_array_compare = np.testing.utils.assert_array_compare except AttributeError: assert_array_compare = np.testing.assert_array_compare NaTmap = { dtype('datetime64[%s]' % unit): datetime64('NaT', unit) for unit in ('ns', 'us', 'ms', 's', 'm', 'D') } def NaT_for_dtype(dtype): """Retrieve NaT with the same units as ``dtype``. Parameters ---------- dtype : dtype-coercable The dtype to lookup the NaT value for. Returns ------- NaT : dtype The NaT value for the given dtype. """ return NaTmap[np.dtype(dtype)] NaTns = NaT_for_dtype(datetime64ns_dtype) NaTD = NaT_for_dtype(datetime64D_dtype) _FILLVALUE_DEFAULTS = { bool_dtype: False, float32_dtype: nan, float64_dtype: nan, datetime64ns_dtype: NaTns, object_dtype: None, } INT_DTYPES_BY_SIZE_BYTES = OrderedDict([ (1, dtype('int8')), (2, dtype('int16')), (4, dtype('int32')), (8, dtype('int64')), ]) UNSIGNED_INT_DTYPES_BY_SIZE_BYTES = OrderedDict([ (1, dtype('uint8')), (2, dtype('uint16')), (4, dtype('uint32')), (8, dtype('uint64')), ]) def int_dtype_with_size_in_bytes(size): try: return INT_DTYPES_BY_SIZE_BYTES[size] except KeyError: raise ValueError("No integral dtype whose size is %d bytes." % size) def unsigned_int_dtype_with_size_in_bytes(size): try: return UNSIGNED_INT_DTYPES_BY_SIZE_BYTES[size] except KeyError: raise ValueError( "No unsigned integral dtype whose size is %d bytes." % size ) class NoDefaultMissingValue(Exception): pass def make_kind_check(python_types, numpy_kind): """ Make a function that checks whether a scalar or array is of a given kind (e.g. float, int, datetime, timedelta). """ def check(value): if hasattr(value, 'dtype'): return value.dtype.kind == numpy_kind return isinstance(value, python_types) return check is_float = make_kind_check(float, 'f') is_int = make_kind_check(int, 'i') is_datetime = make_kind_check(datetime, 'M') is_object = make_kind_check(object, 'O') def coerce_to_dtype(dtype, value): """ Make a value with the specified numpy dtype. Only datetime64[ns] and datetime64[D] are supported for datetime dtypes. """ name = dtype.name if name.startswith('datetime64'): if name == 'datetime64[D]': return make_datetime64D(value) elif name == 'datetime64[ns]': return make_datetime64ns(value) else: raise TypeError( "Don't know how to coerce values of dtype %s" % dtype ) return dtype.type(value) def default_missing_value_for_dtype(dtype): """ Get the default fill value for `dtype`. """ try: return _FILLVALUE_DEFAULTS[dtype] except KeyError: raise NoDefaultMissingValue( "No default value registered for dtype %s." % dtype ) def repeat_first_axis(array, count): """ Restride `array` to repeat `count` times along the first axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape (count,) + array.shape, composed of `array` repeated `count` times along the first axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_first_axis(a, 2) array([[0, 1, 2], [0, 1, 2]]) >>> repeat_first_axis(a, 4) array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, (count,) + array.shape, (0,) + array.strides) def repeat_last_axis(array, count): """ Restride `array` to repeat `count` times along the last axis. Parameters ---------- array : np.array The array to restride. count : int Number of times to repeat `array`. Returns ------- result : array Array of shape array.shape + (count,) composed of `array` repeated `count` times along the last axis. Example ------- >>> from numpy import arange >>> a = arange(3); a array([0, 1, 2]) >>> repeat_last_axis(a, 2) array([[0, 0], [1, 1], [2, 2]]) >>> repeat_last_axis(a, 4) array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]) Notes ---- The resulting array will share memory with `array`. If you need to assign to the input or output, you should probably make a copy first. See Also -------- repeat_last_axis """ return as_strided(array, array.shape + (count,), array.strides + (0,)) def rolling_window(array, length): """ Restride an array of shape (X_0, ... X_N) into an array of shape (length, X_0 - length + 1, ... X_N) where each slice at index i along the first axis is equivalent to result[i] = array[length * i:length * (i + 1)] Parameters ---------- array : np.ndarray The base array. length : int Length of the synthetic first axis to generate. Returns ------- out : np.ndarray Example ------- >>> from numpy import arange >>> a = arange(25).reshape(5, 5) >>> a array([[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14], [15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]) >>> rolling_window(a, 2) array([[[ 0, 1, 2, 3, 4], [ 5, 6, 7, 8, 9]], <BLANKLINE> [[ 5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], <BLANKLINE> [[10, 11, 12, 13, 14], [15, 16, 17, 18, 19]], <BLANKLINE> [[15, 16, 17, 18, 19], [20, 21, 22, 23, 24]]]) """ orig_shape = array.shape if not orig_shape: raise IndexError("Can't restride a scalar.") elif orig_shape[0] <= length: raise IndexError( "Can't restride array of shape {shape} with" " a window length of {len}".format( shape=orig_shape, len=length, ) ) num_windows = (orig_shape[0] - length + 1) new_shape = (num_windows, length) + orig_shape[1:] new_strides = (array.strides[0],) + array.strides return as_strided(array, new_shape, new_strides) # Sentinel value that isn't NaT. _notNaT = make_datetime64D(0) iNaT = int(NaTns.view(int64_dtype)) assert iNaT == NaTD.view(int64_dtype), "iNaTns != iNaTD" def isnat(obj): """ Check if a value is np.NaT. """ if obj.dtype.kind not in ('m', 'M'): raise ValueError("%s is not a numpy datetime or timedelta") return obj.view(int64_dtype) == iNaT def is_missing(data, missing_value): """ Generic is_missing function that handles NaN and NaT. """ if is_float(data) and isnan(missing_value): return isnan(data) elif is_datetime(data) and isnat(missing_value): return isnat(data) elif is_object(data) and missing_value is None: # XXX: Older versions of numpy returns True/False for array == # None. Work around this by boxing None in a 1x1 array, which causes # numpy to do the broadcasted comparison we want. return data == np.array([missing_value]) return (data == missing_value) def same(x, y): """ Check if two scalar values are "the same". Returns True if `x == y`, or if x and y are both NaN or both NaT. """ if is_float(x) and isnan(x) and is_float(y) and isnan(y): return True elif is_datetime(x) and isnat(x) and is_datetime(y) and isnat(y): return True else: return x == y def busday_count_mask_NaT(begindates, enddates, out=None): """ Simple of numpy.busday_count that returns `float` arrays rather than int arrays, and handles `NaT`s by returning `NaN`s where the inputs were `NaT`. Doesn't support custom weekdays or calendars, but probably should in the future. See Also -------- np.busday_count """ if out is None: out = empty(broadcast(begindates, enddates).shape, dtype=float) beginmask = isnat(begindates) endmask = isnat(enddates) out = busday_count( # Temporarily fill in non-NaT values. where(beginmask, _notNaT, begindates), where(endmask, _notNaT, enddates), out=out, ) # Fill in entries where either comparison was NaT with nan in the output. out[beginmask | endmask] = nan return out class WarningContext(object): """ Re-usable contextmanager for contextually managing warnings. """ def __init__(self, *warning_specs): self._warning_specs = warning_specs self._catchers = [] def __enter__(self): catcher = catch_warnings() catcher.__enter__() self._catchers.append(catcher) for args, kwargs in self._warning_specs: filterwarnings(*args, **kwargs) return self def __exit__(self, *exc_info): catcher = self._catchers.pop() return catcher.__exit__(*exc_info) def ignore_nanwarnings(): """ Helper for building a WarningContext that ignores warnings from numpy's nanfunctions. """ return WarningContext( ( ('ignore',), {'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'}, ) ) def vectorized_is_element(array, choices): """ Check if each element of ``array`` is in choices. Parameters ---------- array : np.ndarray choices : object Object implementing __contains__. Returns ------- was_element : np.ndarray[bool] Array indicating whether each element of ``array`` was in ``choices``. """ return vectorize(choices.__contains__, otypes=[bool])(array) def as_column(a): """ Convert an array of shape (N,) into an array of shape (N, 1). This is equivalent to `a[:, np.newaxis]`. Parameters ---------- a : np.ndarray Example ------- >>> import numpy as np >>> a = np.arange(5) >>> a array([0, 1, 2, 3, 4]) >>> as_column(a) array([[0], [1], [2], [3], [4]]) >>> as_column(a).shape (5, 1) """ if a.ndim != 1: raise ValueError( "as_column expected an 1-dimensional array, " "but got an array of shape %s" % (a.shape,) ) return a[:, None] def changed_locations(a, include_first): """ Compute indices of values in ``a`` that differ from the previous value. Parameters ---------- a : np.ndarray The array on which to indices of change. include_first : bool Whether or not to consider the first index of the array as "changed". Example ------- >>> import numpy as np >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=False) array([2, 4], dtype=int32) >>> changed_locations(np.array([0, 0, 5, 5, 1, 1]), include_first=True) array([0, 2, 4]) """ if a.ndim > 1: raise ValueError("indices_of_changed_values only supports 1D arrays.") indices = flatnonzero(diff(a)) + 1 if not include_first: return indices return hstack([[0], indices]) def compare_datetime_arrays(x, y): """ Compare datetime64 ndarrays, treating NaT values as equal. """ return array_equal(x.view('int64'), y.view('int64')) def bytes_array_to_native_str_object_array(a): """Convert an array of dtype S to an object array containing `str`. """ if six.PY2: return a.astype(object) else: return a.astype(str).astype(object)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/numpy_utils.py
numpy_utils.py
import functools import inspect from operator import methodcaller import sys from six import PY2 if PY2: from abc import ABCMeta from types import DictProxyType from cgi import escape as escape_html import contextlib from contextlib2 import ExitStack from ctypes import py_object, pythonapi _new_mappingproxy = pythonapi.PyDictProxy_New _new_mappingproxy.argtypes = [py_object] _new_mappingproxy.restype = py_object # Make mappingproxy a "class" so that we can use multipledispatch # with it or do an ``isinstance(ob, mappingproxy)`` check in Python 2. # You will never actually get an instance of this object, you will just # get instances of ``types.DictProxyType``; however, ``mappingproxy`` is # registered as a virtual super class so ``isinstance`` and ``issubclass`` # will work as expected. The only thing that will appear strange is that: # ``type(mappingproxy({})) is not mappingproxy``, but you shouldn't do # that. class mappingproxy(object): __metaclass__ = ABCMeta def __new__(cls, *args, **kwargs): return _new_mappingproxy(*args, **kwargs) mappingproxy.register(DictProxyType) # clear names not imported in the other branch del DictProxyType del ABCMeta del py_object del pythonapi def exc_clear(): sys.exc_clear() def consistent_round(val): return round(val) def update_wrapper(wrapper, wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): """Backport of Python 3's functools.update_wrapper for __wrapped__. """ for attr in assigned: try: value = getattr(wrapped, attr) except AttributeError: pass else: setattr(wrapper, attr, value) for attr in updated: getattr(wrapper, attr).update(getattr(wrapped, attr, {})) # Issue #17482: set __wrapped__ last so we don't inadvertently copy it # from the wrapped function when updating __dict__ wrapper.__wrapped__ = wrapped # Return the wrapper so this can be used as a decorator via partial() return wrapper def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): """Decorator factory to apply update_wrapper() to a wrapper function Returns a decorator that invokes update_wrapper() with the decorated function as the wrapper argument and the arguments to wraps() as the remaining arguments. Default arguments are as for update_wrapper(). This is a convenience function to simplify applying partial() to update_wrapper(). """ return functools.partial(update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated) values_as_list = methodcaller('values') # This is deprecated in python 3.6+. getargspec = inspect.getargspec # Updated version of contextlib.contextmanager that uses our updated # `wraps` to preserve function signatures. @wraps(contextlib.contextmanager) def contextmanager(f): @wraps(f) def helper(*args, **kwargs): return contextlib.GeneratorContextManager(f(*args, **kwargs)) return helper else: from contextlib import contextmanager, ExitStack from html import escape as escape_html from types import MappingProxyType as mappingproxy from math import ceil def exc_clear(): # exc_clear was removed in Python 3. The except statement automatically # clears the exception. pass def consistent_round(val): if (val % 1) >= 0.5: return ceil(val) else: return round(val) update_wrapper = functools.update_wrapper wraps = functools.wraps def values_as_list(dictionary): """Return the dictionary values as a list without forcing a copy in Python 2. """ return list(dictionary.values()) def getargspec(f): full_argspec = inspect.getfullargspec(f) return inspect.ArgSpec( args=full_argspec.args, varargs=full_argspec.varargs, keywords=full_argspec.varkw, defaults=full_argspec.defaults, ) unicode = type(u'') __all__ = [ 'PY2', 'ExitStack', 'consistent_round', 'contextmanager', 'escape_html', 'exc_clear', 'mappingproxy', 'unicode', 'update_wrapper', 'values_as_list', 'wraps', ]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/compat.py
compat.py
from collections import MutableMapping import errno from functools import partial import os import pickle from distutils import dir_util from shutil import rmtree, move from tempfile import mkdtemp, NamedTemporaryFile import pandas as pd from .compat import PY2 from .context_tricks import nop_context from .paths import ensure_directory from .sentinel import sentinel class Expired(Exception): """Marks that a :class:`CachedObject` has expired. """ ExpiredCachedObject = sentinel('ExpiredCachedObject') AlwaysExpired = sentinel('AlwaysExpired') class CachedObject(object): """ A simple struct for maintaining a cached object with an expiration date. Parameters ---------- value : object The object to cache. expires : datetime-like Expiration date of `value`. The cache is considered invalid for dates **strictly greater** than `expires`. Examples -------- >>> from pandas import Timestamp, Timedelta >>> expires = Timestamp('2014', tz='UTC') >>> obj = CachedObject(1, expires) >>> obj.unwrap(expires - Timedelta('1 minute')) 1 >>> obj.unwrap(expires) 1 >>> obj.unwrap(expires + Timedelta('1 minute')) ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Expired: 2014-01-01 00:00:00+00:00 """ def __init__(self, value, expires): self._value = value self._expires = expires @classmethod def expired(cls): """Construct a CachedObject that's expired at any time. """ return cls(ExpiredCachedObject, expires=AlwaysExpired) def unwrap(self, dt): """ Get the cached value. Returns ------- value : object The cached value. Raises ------ Expired Raised when `dt` is greater than self.expires. """ expires = self._expires if expires is AlwaysExpired or expires < dt: raise Expired(self._expires) return self._value def _unsafe_get_value(self): """You almost certainly shouldn't use this.""" return self._value class ExpiringCache(object): """ A cache of multiple CachedObjects, which returns the wrapped the value or raises and deletes the CachedObject if the value has expired. Parameters ---------- cache : dict-like, optional An instance of a dict-like object which needs to support at least: `__del__`, `__getitem__`, `__setitem__` If `None`, than a dict is used as a default. cleanup : callable, optional A method that takes a single argument, a cached object, and is called upon expiry of the cached object, prior to deleting the object. If not provided, defaults to a no-op. Examples -------- >>> from pandas import Timestamp, Timedelta >>> expires = Timestamp('2014', tz='UTC') >>> value = 1 >>> cache = ExpiringCache() >>> cache.set('foo', value, expires) >>> cache.get('foo', expires - Timedelta('1 minute')) 1 >>> cache.get('foo', expires + Timedelta('1 minute')) Traceback (most recent call last): ... KeyError: 'foo' """ def __init__(self, cache=None, cleanup=lambda value_to_clean: None): if cache is not None: self._cache = cache else: self._cache = {} self.cleanup = cleanup def get(self, key, dt): """Get the value of a cached object. Parameters ---------- key : any The key to lookup. dt : datetime The time of the lookup. Returns ------- result : any The value for ``key``. Raises ------ KeyError Raised if the key is not in the cache or the value for the key has expired. """ try: return self._cache[key].unwrap(dt) except Expired: self.cleanup(self._cache[key]._unsafe_get_value()) del self._cache[key] raise KeyError(key) def set(self, key, value, expiration_dt): """Adds a new key value pair to the cache. Parameters ---------- key : any The key to use for the pair. value : any The value to store under the name ``key``. expiration_dt : datetime When should this mapping expire? The cache is considered invalid for dates **strictly greater** than ``expiration_dt``. """ self._cache[key] = CachedObject(value, expiration_dt) class dataframe_cache(MutableMapping): """A disk-backed cache for dataframes. ``dataframe_cache`` is a mutable mapping from string names to pandas DataFrame objects. This object may be used as a context manager to delete the cache directory on exit. Parameters ---------- path : str, optional The directory path to the cache. Files will be written as ``path/<keyname>``. lock : Lock, optional Thread lock for multithreaded/multiprocessed access to the cache. If not provided no locking will be used. clean_on_failure : bool, optional Should the directory be cleaned up if an exception is raised in the context manager. serialize : {'msgpack', 'pickle:<n>'}, optional How should the data be serialized. If ``'pickle'`` is passed, an optional pickle protocol can be passed like: ``'pickle:3'`` which says to use pickle protocol 3. Notes ----- The syntax ``cache[:]`` will load all key:value pairs into memory as a dictionary. The cache uses a temporary file format that is subject to change between versions of zipline. """ def __init__(self, path=None, lock=None, clean_on_failure=True, serialization='msgpack'): self.path = path if path is not None else mkdtemp() self.lock = lock if lock is not None else nop_context self.clean_on_failure = clean_on_failure if serialization == 'msgpack': self.serialize = self._serialize_msgpack self.deserialize = self._read_msgpack self._protocol = None else: s = serialization.split(':', 1) if s[0] != 'pickle': raise ValueError( "'serialization' must be either 'msgpack' or 'pickle[:n]'", ) self._protocol = int(s[1]) if len(s) == 2 else None self.serialize = self._serialize_pickle self.deserialize = ( pickle.load if PY2 else partial(pickle.load, encoding='latin-1') ) ensure_directory(self.path) def _read_msgpack(self): pass def _serialize_msgpack(self, df, path): print('serialize msgpack') exit() def _serialize_pickle(self, df, path): with open(path, 'wb') as f: pickle.dump(df, f, protocol=self._protocol) def _keypath(self, key): return os.path.join(self.path, key) def __enter__(self): return self def __exit__(self, type_, value, tb): if not (self.clean_on_failure or value is None): # we are not cleaning up after a failure and there was an exception return with self.lock: rmtree(self.path) def __getitem__(self, key): if key == slice(None): return dict(self.items()) with self.lock: try: with open(self._keypath(key), 'rb') as f: return self.deserialize(f) except IOError as e: if e.errno != errno.ENOENT: raise raise KeyError(key) def __setitem__(self, key, value): with self.lock: self.serialize(value, self._keypath(key)) def __delitem__(self, key): with self.lock: try: os.remove(self._keypath(key)) except OSError as e: if e.errno == errno.ENOENT: # raise a keyerror if this directory did not exist raise KeyError(key) # reraise the actual oserror otherwise raise def __iter__(self): return iter(os.listdir(self.path)) def __len__(self): return len(os.listdir(self.path)) def __repr__(self): return '<%s: keys={%s}>' % ( type(self).__name__, ', '.join(map(repr, sorted(self))), ) class working_file(object): """A context manager for managing a temporary file that will be moved to a non-temporary location if no exceptions are raised in the context. Parameters ---------- final_path : str The location to move the file when committing. *args, **kwargs Forwarded to NamedTemporaryFile. Notes ----- The file is moved on __exit__ if there are no exceptions. ``working_file`` uses :func:`shutil.move` to move the actual files, meaning it has as strong of guarantees as :func:`shutil.move`. """ def __init__(self, final_path, *args, **kwargs): self._tmpfile = NamedTemporaryFile(delete=False, *args, **kwargs) self._final_path = final_path @property def path(self): """Alias for ``name`` to be consistent with :class:`~zipline.utils.cache.working_dir`. """ return self._tmpfile.name def _commit(self): """Sync the temporary file to the final path. """ move(self.path, self._final_path) def __enter__(self): self._tmpfile.__enter__() return self def __exit__(self, *exc_info): self._tmpfile.__exit__(*exc_info) if exc_info[0] is None: self._commit() class working_dir(object): """A context manager for managing a temporary directory that will be moved to a non-temporary location if no exceptions are raised in the context. Parameters ---------- final_path : str The location to move the file when committing. *args, **kwargs Forwarded to tmp_dir. Notes ----- The file is moved on __exit__ if there are no exceptions. ``working_dir`` uses :func:`dir_util.copy_tree` to move the actual files, meaning it has as strong of guarantees as :func:`dir_util.copy_tree`. """ def __init__(self, final_path, *args, **kwargs): self.path = mkdtemp() self._final_path = final_path def ensure_dir(self, *path_parts): """Ensures a subdirectory of the working directory. Parameters ---------- path_parts : iterable[str] The parts of the path after the working directory. """ path = self.getpath(*path_parts) ensure_directory(path) return path def getpath(self, *path_parts): """Get a path relative to the working directory. Parameters ---------- path_parts : iterable[str] The parts of the path after the working directory. """ return os.path.join(self.path, *path_parts) def _commit(self): """Sync the temporary directory to the final path. """ dir_util.copy_tree(self.path, self._final_path) def __enter__(self): return self def __exit__(self, *exc_info): if exc_info[0] is None: self._commit() rmtree(self.path)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/cache.py
cache.py
from contextlib import contextmanager from copy import deepcopy from itertools import product import operator as op import warnings import numpy as np import pandas as pd from distutils.version import StrictVersion from trading_calendars.utils.pandas_utils import days_at_time # noqa: reexport pandas_version = StrictVersion(pd.__version__) new_pandas = pandas_version >= StrictVersion('0.19') skip_pipeline_new_pandas = \ 'Pipeline categoricals are not yet compatible with pandas >=0.19' if pandas_version >= StrictVersion('0.20'): def normalize_date(dt): """ Normalize datetime.datetime value to midnight. Returns datetime.date as a datetime.datetime at midnight Returns ------- normalized : datetime.datetime or Timestamp """ return dt.normalize() else: from pandas.tseries.tools import normalize_date # noqa def july_5th_holiday_observance(datetime_index): return datetime_index[datetime_index.year != 2013] def explode(df): """ Take a DataFrame and return a triple of (df.index, df.columns, df.values) """ return df.index, df.columns, df.values def _time_to_micros(time): """Convert a time into microseconds since midnight. Parameters ---------- time : datetime.time The time to convert. Returns ------- us : int The number of microseconds since midnight. Notes ----- This does not account for leap seconds or daylight savings. """ seconds = time.hour * 60 * 60 + time.minute * 60 + time.second return 1000000 * seconds + time.microsecond _opmap = dict(zip( product((True, False), repeat=3), product((op.le, op.lt), (op.le, op.lt), (op.and_, op.or_)), )) def mask_between_time(dts, start, end, include_start=True, include_end=True): """Return a mask of all of the datetimes in ``dts`` that are between ``start`` and ``end``. Parameters ---------- dts : pd.DatetimeIndex The index to mask. start : time Mask away times less than the start. end : time Mask away times greater than the end. include_start : bool, optional Inclusive on ``start``. include_end : bool, optional Inclusive on ``end``. Returns ------- mask : np.ndarray[bool] A bool array masking ``dts``. See Also -------- :meth:`pandas.DatetimeIndex.indexer_between_time` """ # This function is adapted from # `pandas.Datetime.Index.indexer_between_time` which was originally # written by Wes McKinney, Chang She, and Grant Roch. time_micros = dts._get_time_micros() start_micros = _time_to_micros(start) end_micros = _time_to_micros(end) left_op, right_op, join_op = _opmap[ bool(include_start), bool(include_end), start_micros <= end_micros, ] return join_op( left_op(start_micros, time_micros), right_op(time_micros, end_micros), ) def find_in_sorted_index(dts, dt): """ Find the index of ``dt`` in ``dts``. This function should be used instead of `dts.get_loc(dt)` if the index is large enough that we don't want to initialize a hash table in ``dts``. In particular, this should always be used on minutely trading calendars. Parameters ---------- dts : pd.DatetimeIndex Index in which to look up ``dt``. **Must be sorted**. dt : pd.Timestamp ``dt`` to be looked up. Returns ------- ix : int Integer index such that dts[ix] == dt. Raises ------ KeyError If dt is not in ``dts``. """ ix = dts.searchsorted(dt) if ix == len(dts) or dts[ix] != dt: raise LookupError("{dt} is not in {dts}".format(dt=dt, dts=dts)) return ix def nearest_unequal_elements(dts, dt): """ Find values in ``dts`` closest but not equal to ``dt``. Returns a pair of (last_before, first_after). When ``dt`` is less than any element in ``dts``, ``last_before`` is None. When ``dt`` is greater any element in ``dts``, ``first_after`` is None. ``dts`` must be unique and sorted in increasing order. Parameters ---------- dts : pd.DatetimeIndex Dates in which to search. dt : pd.Timestamp Date for which to find bounds. """ if not dts.is_unique: raise ValueError("dts must be unique") if not dts.is_monotonic_increasing: raise ValueError("dts must be sorted in increasing order") if not len(dts): return None, None sortpos = dts.searchsorted(dt, side='left') try: sortval = dts[sortpos] except IndexError: # dt is greater than any value in the array. return dts[-1], None if dt < sortval: lower_ix = sortpos - 1 upper_ix = sortpos elif dt == sortval: lower_ix = sortpos - 1 upper_ix = sortpos + 1 else: lower_ix = sortpos upper_ix = sortpos + 1 lower_value = dts[lower_ix] if lower_ix >= 0 else None upper_value = dts[upper_ix] if upper_ix < len(dts) else None return lower_value, upper_value def timedelta_to_integral_seconds(delta): """ Convert a pd.Timedelta to a number of seconds as an int. """ return int(delta.total_seconds()) def timedelta_to_integral_minutes(delta): """ Convert a pd.Timedelta to a number of minutes as an int. """ return timedelta_to_integral_seconds(delta) // 60 @contextmanager def ignore_pandas_nan_categorical_warning(): with warnings.catch_warnings(): # Pandas >= 0.18 doesn't like null-ish values in categories, but # avoiding that requires a broader change to how missing values are # handled in pipeline, so for now just silence the warning. warnings.filterwarnings( 'ignore', category=FutureWarning, ) yield # pd==.21.x enumarted list from pd.Index.get_indexers_list() # https://github.com/pandas-dev/pandas/blob/0.21.x/pandas/core/indexing.py#L29 # in pd==1 now mixins. maybe do not have to remove these? _INDEXER_NAMES = ['_ix', '_iloc', '_loc', '_at', '_iat'] def clear_dataframe_indexer_caches(df): """ Clear cached attributes from a pandas DataFrame. By default pandas memoizes indexers (`iloc`, `loc`, `ix`, etc.) objects on DataFrames, resulting in refcycles that can lead to unexpectedly long-lived DataFrames. This function attempts to clear those cycles by deleting the cached indexers from the frame. Parameters ---------- df : pd.DataFrame """ for attr in _INDEXER_NAMES: try: delattr(df, attr) except AttributeError: pass def categorical_df_concat(df_list, inplace=False): """ Prepare list of pandas DataFrames to be used as input to pd.concat. Ensure any columns of type 'category' have the same categories across each dataframe. Parameters ---------- df_list : list List of dataframes with same columns. inplace : bool True if input list can be modified. Default is False. Returns ------- concatenated : df Dataframe of concatenated list. """ if not inplace: df_list = deepcopy(df_list) # Assert each dataframe has the same columns/dtypes df = df_list[0] if not all([(df.dtypes.equals(df_i.dtypes)) for df_i in df_list[1:]]): raise ValueError("Input DataFrames must have the same columns/dtypes.") categorical_columns = df.columns[df.dtypes == 'category'] for col in categorical_columns: new_categories = _sort_set_none_first( _union_all(frame[col].cat.categories for frame in df_list) ) with ignore_pandas_nan_categorical_warning(): for df in df_list: df[col].cat.set_categories(new_categories, inplace=True) return pd.concat(df_list) def _union_all(iterables): """Union entries in ``iterables`` into a set. """ return set().union(*iterables) def _sort_set_none_first(set_): """Sort a set, sorting ``None`` before other elements, if present. """ if None in set_: set_.remove(None) out = [None] out.extend(sorted(set_)) set_.add(None) return out else: return sorted(set_) def empty_dataframe(*columns): """Create an empty dataframe with columns of particular types. Parameters ---------- *columns The (column_name, column_dtype) pairs. Returns ------- typed_dataframe : pd.DataFrame The empty typed dataframe. Examples -------- >>> df = empty_dataframe( ... ('a', 'int64'), ... ('b', 'float64'), ... ('c', 'datetime64[ns]'), ... ) >>> df Empty DataFrame Columns: [a, b, c] Index: [] df.dtypes a int64 b float64 c datetime64[ns] dtype: object """ return pd.DataFrame(np.array([], dtype=list(columns))) def check_indexes_all_same(indexes, message="Indexes are not equal."): """Check that a list of Index objects are all equal. Parameters ---------- indexes : iterable[pd.Index] Iterable of indexes to check. Raises ------ ValueError If the indexes are not all the same. """ iterator = iter(indexes) first = next(iterator) for other in iterator: same = (first == other) if not same.all(): bad_loc = np.flatnonzero(~same)[0] raise ValueError( "{}\nFirst difference is at index {}: " "{} != {}".format( message, bad_loc, first[bad_loc], other[bad_loc] ), )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/pandas_utils.py
pandas_utils.py
from errno import EEXIST import os from os.path import exists, expanduser, join import pandas as pd def hidden(path): """Check if a path is hidden. Parameters ---------- path : str A filepath. """ return os.path.split(path)[1].startswith('.') def ensure_directory(path): """ Ensure that a directory named "path" exists. """ try: os.makedirs(path) except OSError as exc: if exc.errno == EEXIST and os.path.isdir(path): return raise def ensure_directory_containing(path): """ Ensure that the directory containing `path` exists. This is just a convenience wrapper for doing:: ensure_directory(os.path.dirname(path)) """ ensure_directory(os.path.dirname(path)) def ensure_file(path): """ Ensure that a file exists. This will create any parent directories needed and create an empty file if it does not exist. Parameters ---------- path : str The file path to ensure exists. """ ensure_directory_containing(path) open(path, 'a+').close() # touch the file def update_modified_time(path, times=None): """ Updates the modified time of an existing file. This will create any parent directories needed and create an empty file if it does not exist. Parameters ---------- path : str The file path to update. times : tuple A tuple of size two; access time and modified time """ ensure_directory_containing(path) os.utime(path, times) def last_modified_time(path): """ Get the last modified time of path as a Timestamp. """ return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') def modified_since(path, dt): """ Check whether `path` was modified since `dt`. Returns False if path doesn't exist. Parameters ---------- path : str Path to the file to be checked. dt : pd.Timestamp The date against which to compare last_modified_time(path). Returns ------- was_modified : bool Will be ``False`` if path doesn't exists, or if its last modified date is earlier than or equal to `dt` """ return exists(path) and last_modified_time(path) > dt def zipline_root(environ=None): """ Get the root directory for all zipline-managed files. For testing purposes, this accepts a dictionary to interpret as the os environment. Parameters ---------- environ : dict, optional A dict to interpret as the os environment. Returns ------- root : string Path to the zipline root dir. """ if environ is None: environ = os.environ root = environ.get('ZIPLINE_ROOT', None) if root is None: root = expanduser('~/.zipline') return root def zipline_path(paths, environ=None): """ Get a path relative to the zipline root. Parameters ---------- paths : list[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline root. """ return join(zipline_root(environ=environ), *paths) def default_extension(environ=None): """ Get the path to the default zipline extension file. Parameters ---------- environ : dict, optional An environment dict to forwart to zipline_root. Returns ------- default_extension_path : str The file path to the default zipline extension file. """ return zipline_path(['extension.py'], environ=environ) def data_root(environ=None): """ The root directory for zipline data files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- data_root : str The zipline data root. """ return zipline_path(['data'], environ=environ) def ensure_data_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(data_root(environ=environ)) def data_path(paths, environ=None): """ Get a path relative to the zipline data directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline data root. """ return zipline_path(['data'] + list(paths), environ=environ) def cache_root(environ=None): """ The root directory for zipline cache files. Parameters ---------- environ : dict, optional An environment dict to forward to zipline_root. Returns ------- cache_root : str The zipline cache root. """ return zipline_path(['cache'], environ=environ) def ensure_cache_root(environ=None): """ Ensure that the data root exists. """ ensure_directory(cache_root(environ=environ)) def cache_path(paths, environ=None): """ Get a path relative to the zipline cache directory. Parameters ---------- paths : iterable[str] List of requested path pieces. environ : dict, optional An environment dict to forward to zipline_root. Returns ------- newpath : str The requested path joined with the zipline cache root. """ return zipline_path(['cache'] + list(paths), environ=environ)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/paths.py
paths.py
from collections import namedtuple from itertools import chain from six.moves import map, zip_longest from zipline.errors import ZiplineError from zipline.utils.compat import getargspec Argspec = namedtuple('Argspec', ['args', 'starargs', 'kwargs']) def singleton(cls): instances = {} def getinstance(): if cls not in instances: instances[cls] = cls() return instances[cls] return getinstance @singleton class Ignore(object): def __str__(self): return 'Argument.ignore' __repr__ = __str__ @singleton class NoDefault(object): def __str__(self): return 'Argument.no_default' __repr__ = __str__ @singleton class AnyDefault(object): def __str__(self): return 'Argument.any_default' __repr__ = __str__ class Argument(namedtuple('Argument', ['name', 'default'])): """ An argument to a function. Argument.no_default is a value representing no default to the argument. Argument.ignore is a value that says you should ignore the default value. """ no_default = NoDefault() any_default = AnyDefault() ignore = Ignore() def __new__(cls, name=ignore, default=ignore): return super(Argument, cls).__new__(cls, name, default) def __str__(self): if self.has_no_default(self) or self.ignore_default(self): return str(self.name) else: return '='.join([str(self.name), str(self.default)]) def __repr__(self): return 'Argument(%s, %s)' % (repr(self.name), repr(self.default)) def _defaults_match(self, arg): return any(map(Argument.ignore_default, [self, arg])) \ or (self.default is Argument.any_default and arg.default is not Argument.no_default) \ or (arg.default is Argument.any_default and self.default is not Argument.no_default) \ or self.default == arg.default def _names_match(self, arg): return self.name == arg.name \ or self.name is Argument.ignore \ or arg.name is Argument.ignore def matches(self, arg): return self._names_match(arg) and self._defaults_match(arg) __eq__ = matches @staticmethod def parse_argspec(callable_): """ Takes a callable and returns a tuple with the list of Argument objects, the name of *args, and the name of **kwargs. If *args or **kwargs is not present, it will be None. This returns a namedtuple called Argspec that has three fields named: args, starargs, and kwargs. """ args, varargs, keywords, defaults = getargspec(callable_) defaults = list(defaults or []) if getattr(callable_, '__self__', None) is not None: # This is a bound method, drop the self param. args = args[1:] first_default = len(args) - len(defaults) return Argspec( [Argument(arg, Argument.no_default if n < first_default else defaults[n - first_default]) for n, arg in enumerate(args)], varargs, keywords, ) @staticmethod def has_no_default(arg): return arg.default is Argument.no_default @staticmethod def ignore_default(arg): return arg.default is Argument.ignore def _expect_extra(expected, present, exc_unexpected, exc_missing, exc_args): """ Checks for the presence of an extra to the argument list. Raises expections if this is unexpected or if it is missing and expected. """ if present: if not expected: raise exc_unexpected(*exc_args) elif expected and expected is not Argument.ignore: raise exc_missing(*exc_args) def verify_callable_argspec(callable_, expected_args=Argument.ignore, expect_starargs=Argument.ignore, expect_kwargs=Argument.ignore): """ Checks the callable_ to make sure that it satisfies the given expectations. expected_args should be an iterable of Arguments in the order you expect to receive them. expect_starargs means that the function should or should not take a *args param. expect_kwargs says the callable should or should not take **kwargs param. If expected_args, expect_starargs, or expect_kwargs is Argument.ignore, then the checks related to that argument will not occur. Example usage: callable_check( f, [Argument('a'), Argument('b', 1)], expect_starargs=True, expect_kwargs=Argument.ignore ) """ if not callable(callable_): raise NotCallable(callable_) expected_arg_list = list( expected_args if expected_args is not Argument.ignore else [] ) args, starargs, kwargs = Argument.parse_argspec(callable_) exc_args = callable_, args, starargs, kwargs # Check the *args. _expect_extra( expect_starargs, starargs, UnexpectedStarargs, NoStarargs, exc_args, ) # Check the **kwargs. _expect_extra( expect_kwargs, kwargs, UnexpectedKwargs, NoKwargs, exc_args, ) if expected_args is Argument.ignore: # Ignore the argument list checks. return if len(args) < len(expected_arg_list): # One or more argument that we expected was not present. raise NotEnoughArguments( callable_, args, starargs, kwargs, [arg for arg in expected_arg_list if arg not in args], ) elif len(args) > len(expected_arg_list): raise TooManyArguments( callable_, args, starargs, kwargs ) # Empty argument that will not match with any actual arguments. missing_arg = Argument(object(), object()) for expected, provided in zip_longest(expected_arg_list, args, fillvalue=missing_arg): if not expected.matches(provided): raise MismatchedArguments( callable_, args, starargs, kwargs ) class BadCallable(TypeError, AssertionError, ZiplineError): """ The given callable is not structured in the expected way. """ _lambda_name = (lambda: None).__name__ def __init__(self, callable_, args, starargs, kwargs): self.callable_ = callable_ self.args = args self.starargs = starargs self.kwargsname = kwargs self.kwargs = {} def format_callable(self): if self.callable_.__name__ == self._lambda_name: fmt = '%s %s' name = 'lambda' else: fmt = '%s(%s)' name = self.callable_.__name__ return fmt % ( name, ', '.join( chain( (str(arg) for arg in self.args), ('*' + sa for sa in (self.starargs,) if sa is not None), ('**' + ka for ka in (self.kwargsname,) if ka is not None), ) ) ) @property def msg(self): return str(self) class NoStarargs(BadCallable): def __str__(self): return '%s does not allow for *args' % self.format_callable() class UnexpectedStarargs(BadCallable): def __str__(self): return '%s should not allow for *args' % self.format_callable() class NoKwargs(BadCallable): def __str__(self): return '%s does not allow for **kwargs' % self.format_callable() class UnexpectedKwargs(BadCallable): def __str__(self): return '%s should not allow for **kwargs' % self.format_callable() class NotCallable(BadCallable): """ The provided 'callable' is not actually a callable. """ def __init__(self, callable_): self.callable_ = callable_ def __str__(self): return '%s is not callable' % self.format_callable() def format_callable(self): try: return self.callable_.__name__ except AttributeError: return str(self.callable_) class NotEnoughArguments(BadCallable): """ The callback does not accept enough arguments. """ def __init__(self, callable_, args, starargs, kwargs, missing_args): super(NotEnoughArguments, self).__init__( callable_, args, starargs, kwargs ) self.missing_args = missing_args def __str__(self): missing_args = list(map(str, self.missing_args)) return '%s is missing argument%s: %s' % ( self.format_callable(), 's' if len(missing_args) > 1 else '', ', '.join(missing_args), ) class TooManyArguments(BadCallable): """ The callback cannot be called by passing the expected number of arguments. """ def __str__(self): return '%s accepts too many arguments' % self.format_callable() class MismatchedArguments(BadCallable): """ The argument lists are of the same lengths, but not in the correct order. """ def __str__(self): return '%s accepts mismatched parameters' % self.format_callable()
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/argcheck.py
argcheck.py
@object.__new__ class nop_context(object): """A nop context manager. """ def __enter__(self): pass def __exit__(self, *excinfo): pass def _nop(*args, **kwargs): pass class CallbackManager(object): """Create a context manager from a pre-execution callback and a post-execution callback. Parameters ---------- pre : (...) -> any, optional A pre-execution callback. This will be passed ``*args`` and ``**kwargs``. post : (...) -> any, optional A post-execution callback. This will be passed ``*args`` and ``**kwargs``. Notes ----- The enter value of this context manager will be the result of calling ``pre(*args, **kwargs)`` Examples -------- >>> def pre(where): ... print('entering %s block' % where) >>> def post(where): ... print('exiting %s block' % where) >>> manager = CallbackManager(pre, post) >>> with manager('example'): ... print('inside example block') entering example block inside example block exiting example block These are reusable with different args: >>> with manager('another'): ... print('inside another block') entering another block inside another block exiting another block """ def __init__(self, pre=None, post=None): self.pre = pre if pre is not None else _nop self.post = post if post is not None else _nop def __call__(self, *args, **kwargs): return _ManagedCallbackContext(self.pre, self.post, args, kwargs) # special case, if no extra args are passed make this a context manager # which forwards no args to pre and post def __enter__(self): return self.pre() def __exit__(self, *excinfo): self.post() class _ManagedCallbackContext(object): def __init__(self, pre, post, args, kwargs): self._pre = pre self._post = post self._args = args self._kwargs = kwargs def __enter__(self): return self._pre(*self._args, **self._kwargs) def __exit__(self, *excinfo): self._post(*self._args, **self._kwargs)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/context_tricks.py
context_tricks.py
import operator as op from six import PY2 from toolz import peek from zipline.utils.functional import foldr if PY2: class range(object): """Lazy range object with constant time containment check. The arguments are the same as ``range``. """ __slots__ = 'start', 'stop', 'step' def __init__(self, stop, *args): if len(args) > 2: raise TypeError( 'range takes at most 3 arguments (%d given)' % len(args) ) if not args: self.start = 0 self.stop = stop self.step = 1 else: self.start = stop self.stop = args[0] try: self.step = args[1] except IndexError: self.step = 1 if self.step == 0: raise ValueError('range step must not be zero') def __iter__(self): """ Examples -------- >>> list(range(1)) [0] >>> list(range(5)) [0, 1, 2, 3, 4] >>> list(range(1, 5)) [1, 2, 3, 4] >>> list(range(0, 5, 2)) [0, 2, 4] >>> list(range(5, 0, -1)) [5, 4, 3, 2, 1] >>> list(range(5, 0, 1)) [] """ n = self.start stop = self.stop step = self.step cmp_ = op.lt if step > 0 else op.gt while cmp_(n, stop): yield n n += step _ops = ( (op.gt, op.ge), (op.le, op.lt), ) def __contains__(self, other, _ops=_ops): # Algorithm taken from CPython # Objects/rangeobject.c:range_contains_long start = self.start step = self.step cmp_start, cmp_stop = _ops[step > 0] return ( cmp_start(start, other) and cmp_stop(other, self.stop) and (other - start) % step == 0 ) del _ops def __len__(self): """ Examples -------- >>> len(range(1)) 1 >>> len(range(5)) 5 >>> len(range(1, 5)) 4 >>> len(range(0, 5, 2)) 3 >>> len(range(5, 0, -1)) 5 >>> len(range(5, 0, 1)) 0 """ # Algorithm taken from CPython # rangeobject.c:compute_range_length step = self.step if step > 0: low = self.start high = self.stop else: low = self.stop high = self.start step = -step if low >= high: return 0 return (high - low - 1) // step + 1 def __repr__(self): return '%s(%s, %s%s)' % ( type(self).__name__, self.start, self.stop, (', ' + str(self.step)) if self.step != 1 else '', ) def __hash__(self): return hash((type(self), self.start, self.stop, self.step)) def __eq__(self, other): """ Examples -------- >>> range(1) == range(1) True >>> range(0, 5, 2) == range(0, 5, 2) True >>> range(5, 0, -2) == range(5, 0, -2) True >>> range(1) == range(2) False >>> range(0, 5, 2) == range(0, 5, 3) False """ return all( getattr(self, attr) == getattr(other, attr) for attr in self.__slots__ ) else: range = range def from_tuple(tup): """Convert a tuple into a range with error handling. Parameters ---------- tup : tuple (len 2 or 3) The tuple to turn into a range. Returns ------- range : range The range from the tuple. Raises ------ ValueError Raised when the tuple length is not 2 or 3. """ if len(tup) not in (2, 3): raise ValueError( 'tuple must contain 2 or 3 elements, not: %d (%r' % ( len(tup), tup, ), ) return range(*tup) def maybe_from_tuple(tup_or_range): """Convert a tuple into a range but pass ranges through silently. This is useful to ensure that input is a range so that attributes may be accessed with `.start`, `.stop` or so that containment checks are constant time. Parameters ---------- tup_or_range : tuple or range A tuple to pass to from_tuple or a range to return. Returns ------- range : range The input to convert to a range. Raises ------ ValueError Raised when the input is not a tuple or a range. ValueError is also raised if the input is a tuple whose length is not 2 or 3. """ if isinstance(tup_or_range, tuple): return from_tuple(tup_or_range) elif isinstance(tup_or_range, range): return tup_or_range raise ValueError( 'maybe_from_tuple expects a tuple or range, got %r: %r' % ( type(tup_or_range).__name__, tup_or_range, ), ) def _check_steps(a, b): """Check that the steps of ``a`` and ``b`` are both 1. Parameters ---------- a : range The first range to check. b : range The second range to check. Raises ------ ValueError Raised when either step is not 1. """ if a.step != 1: raise ValueError('a.step must be equal to 1, got: %s' % a.step) if b.step != 1: raise ValueError('b.step must be equal to 1, got: %s' % b.step) def overlap(a, b): """Check if two ranges overlap. Parameters ---------- a : range The first range. b : range The second range. Returns ------- overlaps : bool Do these ranges overlap. Notes ----- This function does not support ranges with step != 1. """ _check_steps(a, b) return a.stop >= b.start and b.stop >= a.start def merge(a, b): """Merge two ranges with step == 1. Parameters ---------- a : range The first range. b : range The second range. """ _check_steps(a, b) return range(min(a.start, b.start), max(a.stop, b.stop)) def _combine(n, rs): """helper for ``_group_ranges`` """ try: r, rs = peek(rs) except StopIteration: yield n return if overlap(n, r): yield merge(n, r) next(rs) for r in rs: yield r else: yield n for r in rs: yield r def group_ranges(ranges): """Group any overlapping ranges into a single range. Parameters ---------- ranges : iterable[ranges] A sorted sequence of ranges to group. Returns ------- grouped : iterable[ranges] A sorted sequence of ranges with overlapping ranges merged together. """ return foldr(_combine, ranges, ()) def sorted_diff(rs, ss): try: r, rs = peek(rs) except StopIteration: return try: s, ss = peek(ss) except StopIteration: for r in rs: yield r return rtup = (r.start, r.stop) stup = (s.start, s.stop) if rtup == stup: next(rs) next(ss) elif rtup < stup: yield next(rs) else: next(ss) for t in sorted_diff(rs, ss): yield t def intersecting_ranges(ranges): """Return any ranges that intersect. Parameters ---------- ranges : iterable[ranges] A sequence of ranges to check for intersections. Returns ------- intersections : iterable[ranges] A sequence of all of the ranges that intersected in ``ranges``. Examples -------- >>> ranges = [range(0, 1), range(2, 5), range(4, 7)] >>> list(intersecting_ranges(ranges)) [range(2, 5), range(4, 7)] >>> ranges = [range(0, 1), range(2, 3)] >>> list(intersecting_ranges(ranges)) [] >>> ranges = [range(0, 1), range(1, 2)] >>> list(intersecting_ranges(ranges)) [range(0, 1), range(1, 2)] """ ranges = sorted(ranges, key=op.attrgetter('start')) return sorted_diff(ranges, group_ranges(ranges))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/range.py
range.py
import click import os import sys import warnings from functools import partial import pandas as pd try: from pygments import highlight from pygments.lexers import PythonLexer from pygments.formatters import TerminalFormatter PYGMENTS = True except ImportError: PYGMENTS = False import logbook import pandas as pd import six from toolz import concatv from trading_calendars import get_calendar from zipline.data import bundles from zipline.data.benchmarks import get_benchmark_returns_from_file from zipline.data.data_portal import DataPortal from zipline.data.data_portal_live import DataPortalLive from zipline.finance import metrics from zipline.finance.trading import SimulationParameters from zipline.pipeline.data import USEquityPricing from zipline.pipeline.loaders import USEquityPricingLoader import zipline.utils.paths as pth from zipline.extensions import load from zipline.errors import SymbolNotFound from zipline.algorithm import TradingAlgorithm, NoBenchmark from zipline.algorithm_live import LiveTradingAlgorithm from zipline.finance.blotter import Blotter log = logbook.Logger(__name__) class _RunAlgoError(click.ClickException, ValueError): """Signal an error that should have a different message if invoked from the cli. Parameters ---------- pyfunc_msg : str The message that will be shown when called as a python function. cmdline_msg : str, optional The message that will be shown on the command line. If not provided, this will be the same as ``pyfunc_msg` """ exit_code = 1 def __init__(self, pyfunc_msg, cmdline_msg=None): if cmdline_msg is None: cmdline_msg = pyfunc_msg super(_RunAlgoError, self).__init__(cmdline_msg) self.pyfunc_msg = pyfunc_msg def __str__(self): return self.pyfunc_msg def _run(handle_data, initialize, before_trading_start, analyze, algofile, algotext, defines, data_frequency, capital_base, bundle, bundle_timestamp, start, end, output, trading_calendar, print_algo, metrics_set, local_namespace, environ, blotter, benchmark_spec, broker, state_filename, realtime_bar_target, performance_callback, stop_execution_callback, teardown, execution_id): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. zipline-trader additions: broker - wrapper to connect to a real broker state_filename - saving the context of the algo to be able to restart performance_callback - a callback to send performance results everyday and not only at the end of the backtest. this allows to run live, and monitor the performance of the algorithm stop_execution_callback - A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation could be stopped using this) execution. if the callback returns True, then algo execution will be aborted. teardown - algo method like handle_data() or before_trading_start() that is called when the algo execution stops execution_id - unique id to identify this execution (backtest or live instance) """ bundle_data = bundles.load( bundle, environ, bundle_timestamp, ) if trading_calendar is None: trading_calendar = get_calendar('XNYS') # date parameter validation if trading_calendar.session_distance(start, end) < 1: raise _RunAlgoError( 'There are no trading days between %s and %s' % ( start.date(), end.date(), ), ) benchmark_sid, benchmark_returns = benchmark_spec.resolve( asset_finder=bundle_data.asset_finder, start_date=start, end_date=end, ) emission_rate = 'daily' if broker: emission_rate = 'minute' # if we run zipline as a command line tool, these will probably not be initiated if not start: start = pd.Timestamp.utcnow() if not end: # in cli mode, sessions are 1 day only. and it will be re-ran each day by user end = start + pd.Timedelta('1 day') if algotext is not None: if local_namespace: ip = get_ipython() # noqa namespace = ip.user_ns else: namespace = {} for assign in defines: try: name, value = assign.split('=', 2) except ValueError: raise ValueError( 'invalid define %r, should be of the form name=value' % assign, ) try: # evaluate in the same namespace so names may refer to # eachother namespace[name] = eval(value, namespace) except Exception as e: raise ValueError( 'failed to execute definition for name %r: %s' % (name, e), ) elif defines: raise _RunAlgoError( 'cannot pass define without `algotext`', "cannot pass '-D' / '--define' without '-t' / '--algotext'", ) else: namespace = {} if algofile is not None: algotext = algofile.read() if print_algo: if PYGMENTS: highlight( algotext, PythonLexer(), TerminalFormatter(), outfile=sys.stdout, ) else: click.echo(algotext) #first_trading_day = \ # bundle_data.equity_minute_bar_reader.first_trading_day first_trading_day = \ bundle_data.equity_daily_bar_reader.first_trading_day DataPortalClass = (partial(DataPortalLive, broker) if broker else DataPortal) data = DataPortalClass( bundle_data.asset_finder, trading_calendar=trading_calendar, first_trading_day=first_trading_day, equity_minute_reader=bundle_data.equity_minute_bar_reader, equity_daily_reader=bundle_data.equity_daily_bar_reader, adjustment_reader=bundle_data.adjustment_reader, ) pipeline_loader = USEquityPricingLoader.without_fx( bundle_data.equity_daily_bar_reader, bundle_data.adjustment_reader, ) def choose_loader(column): # TODO Domain bypass return pipeline_loader if column in USEquityPricing.columns: return pipeline_loader raise ValueError( "No PipelineLoader registered for column %s." % column ) if isinstance(metrics_set, six.string_types): try: metrics_set = metrics.load(metrics_set) except ValueError as e: raise _RunAlgoError(str(e)) if isinstance(blotter, six.string_types): try: blotter = load(Blotter, blotter) except ValueError as e: raise _RunAlgoError(str(e)) TradingAlgorithmClass = (partial(LiveTradingAlgorithm, broker=broker, state_filename=state_filename, realtime_bar_target=realtime_bar_target) if broker else TradingAlgorithm) try: perf = TradingAlgorithmClass( namespace=namespace, data_portal=data, get_pipeline_loader=choose_loader, trading_calendar=trading_calendar, sim_params=SimulationParameters( start_session=start, end_session=end, trading_calendar=trading_calendar, capital_base=capital_base, emission_rate=emission_rate, data_frequency=data_frequency, execution_id=execution_id ), metrics_set=metrics_set, blotter=blotter, benchmark_returns=benchmark_returns, benchmark_sid=benchmark_sid, performance_callback=performance_callback, stop_execution_callback=stop_execution_callback, **{ 'initialize': initialize, 'handle_data': handle_data, 'before_trading_start': before_trading_start, 'analyze': analyze, 'teardown': teardown, } if algotext is None else { 'algo_filename': getattr(algofile, 'name', '<algorithm>'), 'script': algotext, } ).run() except NoBenchmark: raise _RunAlgoError( ( 'No ``benchmark_spec`` was provided, and' ' ``zipline.api.set_benchmark`` was not called in' ' ``initialize``.' ), ( "Neither '--benchmark-symbol' nor '--benchmark-sid' was" " provided, and ``zipline.api.set_benchmark`` was not called" " in ``initialize``. Did you mean to pass '--no-benchmark'?" ), ) if output == '-': click.echo(str(perf)) elif output != os.devnull: # make the zipline magic not write any data perf.to_pickle(output) return perf # All of the loaded extensions. We don't want to load an extension twice. _loaded_extensions = set() def load_extensions(default, extensions, strict, environ, reload=False): """Load all of the given extensions. This should be called by run_algo or the cli. Parameters ---------- default : bool Load the default exension (~/.zipline/extension.py)? extension : iterable[str] The paths to the extensions to load. If the path ends in ``.py`` it is treated as a script and executed. If it does not end in ``.py`` it is treated as a module to be imported. strict : bool Should failure to load an extension raise. If this is false it will still warn. environ : mapping The environment to use to find the default extension path. reload : bool, optional Reload any extensions that have already been loaded. """ if default: default_extension_path = pth.default_extension(environ=environ) pth.ensure_file(default_extension_path) # put the default extension first so other extensions can depend on # the order they are loaded extensions = concatv([default_extension_path], extensions) for ext in extensions: if ext in _loaded_extensions and not reload: continue try: # load all of the zipline extensionss if ext.endswith('.py'): with open(ext) as f: ns = {} six.exec_(compile(f.read(), ext, 'exec'), ns, ns) else: __import__(ext) except Exception as e: if strict: # if `strict` we should raise the actual exception and fail raise # without `strict` we should just log the failure warnings.warn( 'Failed to load extension: %r\n%s' % (ext, e), stacklevel=2 ) else: _loaded_extensions.add(ext) def run_algorithm(start, end, initialize, capital_base, handle_data=None, before_trading_start=None, analyze=None, teardown=None, data_frequency='daily', bundle='quantopian-quandl', bundle_timestamp=None, trading_calendar=None, metrics_set='default', benchmark_returns=None, default_extension=True, extensions=(), strict_extensions=True, environ=os.environ, blotter='default', broker=None, performance_callback=None, stop_execution_callback=None, execution_id=None, state_filename=None, realtime_bar_target=None ): """ Run a trading algorithm. Parameters ---------- start : datetime The start date of the backtest. end : datetime The end date of the backtest.. initialize : callable[context -> None] The initialize function to use for the algorithm. This is called once at the very begining of the backtest and should be used to set up any state needed by the algorithm. capital_base : float The starting capital for the backtest. handle_data : callable[(context, BarData) -> None], optional The handle_data function to use for the algorithm. This is called every minute when ``data_frequency == 'minute'`` or every day when ``data_frequency == 'daily'``. before_trading_start : callable[(context, BarData) -> None], optional The before_trading_start function for the algorithm. This is called once before each trading day (after initialize on the first day). analyze : callable[(context, pd.DataFrame) -> None], optional The analyze function to use for the algorithm. This function is called once at the end of the backtest and is passed the context and the performance data. data_frequency : {'daily', 'minute'}, optional The data frequency to run the algorithm at. bundle : str, optional The name of the data bundle to use to load the data to run the backtest with. This defaults to 'quantopian-quandl'. bundle_timestamp : datetime, optional The datetime to lookup the bundle data for. This defaults to the current time. trading_calendar : TradingCalendar, optional The trading calendar to use for your backtest. metrics_set : iterable[Metric] or str, optional The set of metrics to compute in the simulation. If a string is passed, resolve the set with :func:`zipline.finance.metrics.load`. benchmark_returns : pd.Series, optional Series of returns to use as the benchmark. default_extension : bool, optional Should the default zipline extension be loaded. This is found at ``$ZIPLINE_ROOT/extension.py`` extensions : iterable[str], optional The names of any other extensions to load. Each element may either be a dotted module path like ``a.b.c`` or a path to a python file ending in ``.py`` like ``a/b/c.py``. strict_extensions : bool, optional Should the run fail if any extensions fail to load. If this is false, a warning will be raised instead. environ : mapping[str -> str], optional The os environment to use. Many extensions use this to get parameters. This defaults to ``os.environ``. blotter : str or zipline.finance.blotter.Blotter, optional Blotter to use with this algorithm. If passed as a string, we look for a blotter construction function registered with ``zipline.extensions.register`` and call it with no parameters. Default is a :class:`zipline.finance.blotter.SimulationBlotter` that never cancels orders. broker : instance of zipline.gens.brokers.broker.Broker performance_callback : a callback to send performance results everyday and not only at the end of the backtest. this allows to run live, and monitor the performance of the algorithm stop_execution_callback : A callback to check if execution should be stopped. it is used to be able to stop live trading (also simulation could be stopped using this) execution. if the callback returns True, then algo execution will be aborted. teardown : algo method like handle_data() or before_trading_start() that is called when the algo execution stops and allows the developer to nicely kill the algo execution execution_id : unique id to identify this execution instance (backtest or live) will be used to mark and get logs for this specific execution instance. state_filename : path to pickle file storing the algorithm "context" (similar to self) Returns ------- perf : pd.DataFrame The daily performance of the algorithm. See Also -------- zipline.data.bundles.bundles : The available data bundles. """ load_extensions(default_extension, extensions, strict_extensions, environ) benchmark_spec = BenchmarkSpec.from_returns(benchmark_returns) return _run( handle_data=handle_data, initialize=initialize, before_trading_start=before_trading_start, analyze=analyze, teardown=teardown, algofile=None, algotext=None, defines=(), data_frequency=data_frequency, capital_base=capital_base, bundle=bundle, bundle_timestamp=bundle_timestamp, start=start, end=end, output=os.devnull, trading_calendar=trading_calendar, print_algo=False, metrics_set=metrics_set, local_namespace=False, environ=environ, blotter=blotter, benchmark_spec=benchmark_spec, broker=broker, state_filename=state_filename, realtime_bar_target=realtime_bar_target, performance_callback=performance_callback, stop_execution_callback=stop_execution_callback, execution_id=execution_id ) class BenchmarkSpec(object): """ Helper for different ways we can get benchmark data for the Zipline CLI and zipline.utils.run_algo.run_algorithm. Parameters ---------- benchmark_returns : pd.Series, optional Series of returns to use as the benchmark. benchmark_file : str or file File containing a csv with `date` and `return` columns, to be read as the benchmark. benchmark_sid : int, optional Sid of the asset to use as a benchmark. benchmark_symbol : str, optional Symbol of the asset to use as a benchmark. Symbol will be looked up as of the end date of the backtest. no_benchmark : bool Flag indicating that no benchmark is configured. Benchmark-dependent metrics will be calculated using a dummy benchmark of all-zero returns. """ def __init__(self, benchmark_returns, benchmark_file, benchmark_sid, benchmark_symbol, no_benchmark): self.benchmark_returns = benchmark_returns self.benchmark_file = benchmark_file self.benchmark_sid = benchmark_sid self.benchmark_symbol = benchmark_symbol self.no_benchmark = no_benchmark @classmethod def from_cli_params(cls, benchmark_sid, benchmark_symbol, benchmark_file, no_benchmark): return cls( benchmark_returns=None, benchmark_sid=benchmark_sid, benchmark_symbol=benchmark_symbol, benchmark_file=benchmark_file, no_benchmark=no_benchmark, ) @classmethod def from_returns(cls, benchmark_returns): return cls( benchmark_returns=benchmark_returns, benchmark_file=None, benchmark_sid=None, benchmark_symbol=None, no_benchmark=benchmark_returns is None, ) def resolve(self, asset_finder, start_date, end_date): """ Resolve inputs into values to be passed to TradingAlgorithm. Returns a pair of ``(benchmark_sid, benchmark_returns)`` with at most one non-None value. Both values may be None if no benchmark source has been configured. Parameters ---------- asset_finder : zipline.assets.AssetFinder Asset finder for the algorithm to be run. start_date : pd.Timestamp Start date of the algorithm to be run. end_date : pd.Timestamp End date of the algorithm to be run. Returns ------- benchmark_sid : int Sid to use as benchmark. benchmark_returns : pd.Series Series of returns to use as benchmark. """ if self.benchmark_returns is not None: benchmark_sid = None benchmark_returns = self.benchmark_returns elif self.benchmark_file is not None: benchmark_sid = None benchmark_returns = get_benchmark_returns_from_file( self.benchmark_file, ) elif self.benchmark_sid is not None: benchmark_sid = self.benchmark_sid benchmark_returns = None elif self.benchmark_symbol is not None: try: asset = asset_finder.lookup_symbol( self.benchmark_symbol, as_of_date=end_date, ) benchmark_sid = asset.sid benchmark_returns = None except SymbolNotFound: raise _RunAlgoError( "Symbol %r as a benchmark not found in this bundle." % self.benchmark_symbol ) elif self.no_benchmark: benchmark_sid = None benchmark_returns = self._zero_benchmark_returns( start_date=start_date, end_date=end_date, ) else: log.warn( "No benchmark configured. " "Assuming algorithm calls set_benchmark." ) log.warn( "Pass --benchmark-sid, --benchmark-symbol, or" " --benchmark-file to set a source of benchmark returns." ) log.warn( "Pass --no-benchmark to use a dummy benchmark " "of zero returns.", ) benchmark_sid = None benchmark_returns = None return benchmark_sid, benchmark_returns @staticmethod def _zero_benchmark_returns(start_date, end_date): return pd.Series( index=pd.date_range(start_date, end_date, tz='utc'), data=0.0, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/run_algo.py
run_algo.py
from datetime import tzinfo from functools import partial from operator import attrgetter from numpy import dtype import pandas as pd from pytz import timezone from six import iteritems, string_types, PY3 from toolz import valmap, complement, compose import toolz.curried.operator as op from zipline.utils.compat import wraps from zipline.utils.functional import getattrs from zipline.utils.preprocess import call, preprocess if PY3: _qualified_name = attrgetter('__qualname__') else: def _qualified_name(obj): """ Return the fully-qualified name (ignoring inner classes) of a type. """ # If the obj has an explicitly-set __qualname__, use it. try: return getattr(obj, '__qualname__') except AttributeError: pass # If not, build our own __qualname__ as best we can. module = obj.__module__ if module in ('__builtin__', '__main__', 'builtins'): return obj.__name__ return '.'.join([module, obj.__name__]) def verify_indices_all_unique(obj): """ Check that all axes of a pandas object are unique. Parameters ---------- obj : pd.Series / pd.DataFrame / pd.Panel The object to validate. Returns ------- obj : pd.Series / pd.DataFrame / pd.Panel The validated object, unchanged. Raises ------ ValueError If any axis has duplicate entries. """ axis_names = [ ('index',), # Series ('index', 'columns'), # DataFrame ('items', 'major_axis', 'minor_axis') # Panel ][obj.ndim - 1] # ndim = 1 should go to entry 0, for axis_name, index in zip(axis_names, obj.axes): if index.is_unique: continue raise ValueError( "Duplicate entries in {type}.{axis}: {dupes}.".format( type=type(obj).__name__, axis=axis_name, dupes=sorted(index[index.duplicated()]), ) ) return obj def optionally(preprocessor): """Modify a preprocessor to explicitly allow `None`. Parameters ---------- preprocessor : callable[callable, str, any -> any] A preprocessor to delegate to when `arg is not None`. Returns ------- optional_preprocessor : callable[callable, str, any -> any] A preprocessor that delegates to `preprocessor` when `arg is not None`. Examples -------- >>> def preprocessor(func, argname, arg): ... if not isinstance(arg, int): ... raise TypeError('arg must be int') ... return arg ... >>> @preprocess(a=optionally(preprocessor)) ... def f(a): ... return a ... >>> f(1) # call with int 1 >>> f('a') # call with not int Traceback (most recent call last): ... TypeError: arg must be int >>> f(None) is None # call with explicit None True """ @wraps(preprocessor) def wrapper(func, argname, arg): return arg if arg is None else preprocessor(func, argname, arg) return wrapper def ensure_upper_case(func, argname, arg): if isinstance(arg, string_types): return arg.upper() else: raise TypeError( "{0}() expected argument '{1}' to" " be a string, but got {2} instead.".format( func.__name__, argname, arg, ), ) def ensure_dtype(func, argname, arg): """ Argument preprocessor that converts the input into a numpy dtype. Examples -------- >>> import numpy as np >>> from zipline.utils.preprocess import preprocess >>> @preprocess(dtype=ensure_dtype) ... def foo(dtype): ... return dtype ... >>> foo(float) dtype('float64') """ try: return dtype(arg) except TypeError: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a numpy dtype.".format( func=_qualified_name(func), argname=argname, arg=arg, ), ) def ensure_timezone(func, argname, arg): """Argument preprocessor that converts the input into a tzinfo object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(tz=ensure_timezone) ... def foo(tz): ... return tz >>> foo('utc') <UTC> """ if isinstance(arg, tzinfo): return arg if isinstance(arg, string_types): return timezone(arg) raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a timezone.".format( func=_qualified_name(func), argname=argname, arg=arg, ), ) def ensure_timestamp(func, argname, arg): """Argument preprocessor that converts the input into a pandas Timestamp object. Examples -------- >>> from zipline.utils.preprocess import preprocess >>> @preprocess(ts=ensure_timestamp) ... def foo(ts): ... return ts >>> foo('2014-01-01') Timestamp('2014-01-01 00:00:00') """ try: return pd.Timestamp(arg) except ValueError as e: raise TypeError( "{func}() couldn't convert argument " "{argname}={arg!r} to a pandas Timestamp.\n" "Original error was: {t}: {e}".format( func=_qualified_name(func), argname=argname, arg=arg, t=_qualified_name(type(e)), e=e, ), ) def expect_dtypes(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected numpy dtypes. Examples -------- >>> from numpy import dtype, arange, int8, float64 >>> @expect_dtypes(x=dtype(int8)) ... def foo(x, y): ... return x, y ... >>> foo(arange(3, dtype=int8), 'foo') (array([0, 1, 2], dtype=int8), 'foo') >>> foo(arange(3, dtype=float64), 'foo') # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value with dtype 'int8' for argument 'x', but got 'float64' instead. """ for name, type_ in iteritems(named): if not isinstance(type_, (dtype, tuple)): raise TypeError( "expect_dtypes() expected a numpy dtype or tuple of dtypes" " for argument {name!r}, but got {dtype} instead.".format( name=name, dtype=dtype, ) ) if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname @preprocess(dtypes=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_dtype(dtypes): """ Factory for dtype-checking functions that work with the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # name. Otherwise just show the value. try: value_to_show = value.dtype.name except AttributeError: value_to_show = value return ( "{funcname}() expected a value with dtype {dtype_str} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=get_funcname(func), dtype_str=' or '.join(repr(d.name) for d in dtypes), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattr(argvalue, 'dtype', object()) not in dtypes: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_dtype, named)) def expect_kinds(**named): """ Preprocessing decorator that verifies inputs have expected dtype kinds. Examples -------- >>> from numpy import int64, int32, float32 >>> @expect_kinds(x='i') ... def foo(x): ... return x ... >>> foo(int64(2)) 2 >>> foo(int32(2)) 2 >>> foo(float32(2)) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a numpy object of kind 'i' for argument 'x', but got 'f' instead. """ for name, kind in iteritems(named): if not isinstance(kind, (str, tuple)): raise TypeError( "expect_dtype_kinds() expected a string or tuple of strings" " for argument {name!r}, but got {kind} instead.".format( name=name, kind=dtype, ) ) @preprocess(kinds=call(lambda x: x if isinstance(x, tuple) else (x,))) def _expect_kind(kinds): """ Factory for kind-checking functions that work the @preprocess decorator. """ def error_message(func, argname, value): # If the bad value has a dtype, but it's wrong, show the dtype # kind. Otherwise just show the value. try: value_to_show = value.dtype.kind except AttributeError: value_to_show = value return ( "{funcname}() expected a numpy object of kind {kinds} " "for argument {argname!r}, but got {value!r} instead." ).format( funcname=_qualified_name(func), kinds=' or '.join(map(repr, kinds)), argname=argname, value=value_to_show, ) def _actual_preprocessor(func, argname, argvalue): if getattrs(argvalue, ('dtype', 'kind'), object()) not in kinds: raise TypeError(error_message(func, argname, argvalue)) return argvalue return _actual_preprocessor return preprocess(**valmap(_expect_kind, named)) def expect_types(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs have expected types. Examples -------- >>> @expect_types(x=int, y=str) ... def foo(x, y): ... return x, y ... >>> foo(2, '3') (2, '3') >>> foo(2.0, '3') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... TypeError: ...foo() expected a value of type int for argument 'x', but got float instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. """ for name, type_ in iteritems(named): if not isinstance(type_, (type, tuple)): raise TypeError( "expect_types() expected a type or tuple of types for " "argument '{name}', but got {type_} instead.".format( name=name, type_=type_, ) ) def _expect_type(type_): # Slightly different messages for type and tuple of types. _template = ( "%(funcname)s() expected a value of type {type_or_types} " "for argument '%(argname)s', but got %(actual)s instead." ) if isinstance(type_, tuple): template = _template.format( type_or_types=' or '.join(map(_qualified_name, type_)) ) else: template = _template.format(type_or_types=_qualified_name(type_)) return make_check( exc_type=TypeError, template=template, pred=lambda v: not isinstance(v, type_), actual=compose(_qualified_name, type), funcname=__funcname, ) return preprocess(**valmap(_expect_type, named)) def make_check(exc_type, template, pred, actual, funcname): """ Factory for making preprocessing functions that check a predicate on the input value. Parameters ---------- exc_type : Exception The exception type to raise if the predicate fails. template : str A template string to use to create error messages. Should have %-style named template parameters for 'funcname', 'argname', and 'actual'. pred : function[object -> bool] A function to call on the argument being preprocessed. If the predicate returns `True`, we raise an instance of `exc_type`. actual : function[object -> object] A function to call on bad values to produce the value to display in the error message. funcname : str or callable Name to use in error messages, or function to call on decorated functions to produce a name. Passing an explicit name is useful when creating checks for __init__ or __new__ methods when you want the error to refer to the class name instead of the method name. """ if isinstance(funcname, str): def get_funcname(_): return funcname else: get_funcname = funcname def _check(func, argname, argvalue): if pred(argvalue): raise exc_type( template % { 'funcname': get_funcname(func), 'argname': argname, 'actual': actual(argvalue), }, ) return argvalue return _check def optional(type_): """ Helper for use with `expect_types` when an input can be `type_` or `None`. Returns an object such that both `None` and instances of `type_` pass checks of the form `isinstance(obj, optional(type_))`. Parameters ---------- type_ : type Type for which to produce an option. Examples -------- >>> isinstance({}, optional(dict)) True >>> isinstance(None, optional(dict)) True >>> isinstance(1, optional(dict)) False """ return (type_, type(None)) def expect_element(__funcname=_qualified_name, **named): """ Preprocessing decorator that verifies inputs are elements of some expected collection. Examples -------- >>> @expect_element(x=('a', 'b')) ... def foo(x): ... return x.upper() ... >>> foo('a') 'A' >>> foo('b') 'B' >>> foo('c') # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value in ('a', 'b') for argument 'x', but got 'c' instead. Notes ----- A special argument, __funcname, can be provided as a string to override the function name shown in error messages. This is most often used on __init__ or __new__ methods to make errors refer to the class name instead of the function name. This uses the `in` operator (__contains__) to make the containment check. This allows us to use any custom container as long as the object supports the container protocol. """ def _expect_element(collection): if isinstance(collection, (set, frozenset)): # Special case the error message for set and frozen set to make it # less verbose. collection_for_error_message = tuple(sorted(collection)) else: collection_for_error_message = collection template = ( "%(funcname)s() expected a value in {collection} " "for argument '%(argname)s', but got %(actual)s instead." ).format(collection=collection_for_error_message) return make_check( ValueError, template, complement(op.contains(collection)), repr, funcname=__funcname, ) return preprocess(**valmap(_expect_element, named)) def expect_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall INCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(1) 2 >>> foo(5) 6 >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value inclusively between 1 and 5 for argument 'x', but got 6 instead. >>> @expect_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value greater than or equal to 2 for argument 'x', but got 1 instead. >>> @expect_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value less than or equal to 5 for argument 'x', but got 6 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value > upper predicate_descr = "less than or equal to " + str(upper) elif upper is None: def should_fail(value): return value < lower predicate_descr = "greater than or equal to " + str(lower) else: def should_fail(value): return not (lower <= value <= upper) predicate_descr = "inclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named) def expect_strictly_bounded(__funcname=_qualified_name, **named): """ Preprocessing decorator verifying that inputs fall EXCLUSIVELY between bounds. Bounds should be passed as a pair of ``(min_value, max_value)``. ``None`` may be passed as ``min_value`` or ``max_value`` to signify that the input is only bounded above or below. Examples -------- >>> @expect_strictly_bounded(x=(1, 5)) ... def foo(x): ... return x + 1 ... >>> foo(2) 3 >>> foo(4) 5 >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value exclusively between 1 and 5 for argument 'x', but got 5 instead. >>> @expect_strictly_bounded(x=(2, None)) ... def foo(x): ... return x ... >>> foo(100000) 100000 >>> foo(2) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly greater than 2 for argument 'x', but got 2 instead. >>> @expect_strictly_bounded(x=(None, 5)) ... def foo(x): ... return x ... >>> foo(5) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a value strictly less than 5 for argument 'x', but got 5 instead. """ def _make_bounded_check(bounds): (lower, upper) = bounds if lower is None: def should_fail(value): return value >= upper predicate_descr = "strictly less than " + str(upper) elif upper is None: def should_fail(value): return value <= lower predicate_descr = "strictly greater than " + str(lower) else: def should_fail(value): return not (lower < value < upper) predicate_descr = "exclusively between %s and %s" % bounds template = ( "%(funcname)s() expected a value {predicate}" " for argument '%(argname)s', but got %(actual)s instead." ).format(predicate=predicate_descr) return make_check( exc_type=ValueError, template=template, pred=should_fail, actual=repr, funcname=__funcname, ) return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named) def _expect_bounded(make_bounded_check, __funcname, **named): def valid_bounds(t): return ( isinstance(t, tuple) and len(t) == 2 and t != (None, None) ) for name, bounds in iteritems(named): if not valid_bounds(bounds): raise TypeError( "expect_bounded() expected a tuple of bounds for" " argument '{name}', but got {bounds} instead.".format( name=name, bounds=bounds, ) ) return preprocess(**valmap(make_bounded_check, named)) def expect_dimensions(__funcname=_qualified_name, **dimensions): """ Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead. """ if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions)) def coerce(from_, to, **to_kwargs): """ A preprocessing decorator that coerces inputs of a given type by passing them to a callable. Parameters ---------- from : type or tuple or types Inputs types on which to call ``to``. to : function Coercion function to call on inputs. **to_kwargs Additional keywords to forward to every call to ``to``. Examples -------- >>> @preprocess(x=coerce(float, int), y=coerce(float, int)) ... def floordiff(x, y): ... return x - y ... >>> floordiff(3.2, 2.5) 1 >>> @preprocess(x=coerce(str, int, base=2), y=coerce(str, int, base=2)) ... def add_binary_strings(x, y): ... return bin(x + y)[2:] ... >>> add_binary_strings('101', '001') '110' """ def preprocessor(func, argname, arg): if isinstance(arg, from_): return to(arg, **to_kwargs) return arg return preprocessor def coerce_types(**kwargs): """ Preprocessing decorator that applies type coercions. Parameters ---------- **kwargs : dict[str -> (type, callable)] Keyword arguments mapping function parameter names to pairs of (from_type, to_type). Examples -------- >>> @coerce_types(x=(float, int), y=(int, str)) ... def func(x, y): ... return (x, y) ... >>> func(1.0, 3) (1, '3') """ def _coerce(types): return coerce(*types) return preprocess(**valmap(_coerce, kwargs)) class error_keywords(object): def __init__(self, *args, **kwargs): self.messages = kwargs def __call__(self, func): @wraps(func) def assert_keywords_and_call(*args, **kwargs): for field, message in iteritems(self.messages): if field in kwargs: raise TypeError(message) return func(*args, **kwargs) return assert_keywords_and_call coerce_string = partial(coerce, string_types) def validate_keys(dict_, expected, funcname): """Validate that a dictionary has an expected set of keys. """ expected = set(expected) received = set(dict_) missing = expected - received if missing: raise ValueError( "Missing keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) ) unexpected = received - expected if unexpected: raise ValueError( "Unexpected keys in {}:\n" "Expected Keys: {}\n" "Received Keys: {}".format( funcname, sorted(expected), sorted(received), ) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/input_validation.py
input_validation.py
from textwrap import dedent from types import CodeType from uuid import uuid4 from toolz.curried.operator import getitem from six import viewkeys, exec_, PY3 from zipline.utils.compat import getargspec, wraps _code_argorder = ( ('co_argcount', 'co_kwonlyargcount') if PY3 else ('co_argcount',) ) + ( 'co_nlocals', 'co_stacksize', 'co_flags', 'co_code', 'co_consts', 'co_names', 'co_varnames', 'co_filename', 'co_name', 'co_firstlineno', 'co_lnotab', 'co_freevars', 'co_cellvars', ) NO_DEFAULT = object() def preprocess(*_unused, **processors): """ Decorator that applies pre-processors to the arguments of a function before calling the function. Parameters ---------- **processors : dict Map from argument name -> processor function. A processor function takes three arguments: (func, argname, argvalue). `func` is the the function for which we're processing args. `argname` is the name of the argument we're processing. `argvalue` is the value of the argument we're processing. Examples -------- >>> def _ensure_tuple(func, argname, arg): ... if isinstance(arg, tuple): ... return argvalue ... try: ... return tuple(arg) ... except TypeError: ... raise TypeError( ... "%s() expected argument '%s' to" ... " be iterable, but got %s instead." % ( ... func.__name__, argname, arg, ... ) ... ) ... >>> @preprocess(arg=_ensure_tuple) ... def foo(arg): ... return arg ... >>> foo([1, 2, 3]) (1, 2, 3) >>> foo("a") ('a',) >>> foo(2) Traceback (most recent call last): ... TypeError: foo() expected argument 'arg' to be iterable, but got 2 instead. """ if _unused: raise TypeError("preprocess() doesn't accept positional arguments") def _decorator(f): args, varargs, varkw, defaults = argspec = getargspec(f) if defaults is None: defaults = () no_defaults = (NO_DEFAULT,) * (len(args) - len(defaults)) args_defaults = list(zip(args, no_defaults + defaults)) if varargs: args_defaults.append((varargs, NO_DEFAULT)) if varkw: args_defaults.append((varkw, NO_DEFAULT)) argset = set(args) | {varargs, varkw} - {None} # Arguments can be declared as tuples in Python 2. if not all(isinstance(arg, str) for arg in args): raise TypeError( "Can't validate functions using tuple unpacking: %s" % (argspec,) ) # Ensure that all processors map to valid names. bad_names = viewkeys(processors) - argset if bad_names: raise TypeError( "Got processors for unknown arguments: %s." % bad_names ) return _build_preprocessed_function( f, processors, args_defaults, varargs, varkw, ) return _decorator def call(f): """ Wrap a function in a processor that calls `f` on the argument before passing it along. Useful for creating simple arguments to the `@preprocess` decorator. Parameters ---------- f : function Function accepting a single argument and returning a replacement. Examples -------- >>> @preprocess(x=call(lambda x: x + 1)) ... def foo(x): ... return x ... >>> foo(1) 2 """ @wraps(f) def processor(func, argname, arg): return f(arg) return processor def _build_preprocessed_function(func, processors, args_defaults, varargs, varkw): """ Build a preprocessed function with the same signature as `func`. Uses `exec` internally to build a function that actually has the same signature as `func. """ format_kwargs = {'func_name': func.__name__} def mangle(name): return 'a' + uuid4().hex + name format_kwargs['mangled_func'] = mangled_funcname = mangle(func.__name__) def make_processor_assignment(arg, processor_name): template = "{arg} = {processor}({func}, '{arg}', {arg})" return template.format( arg=arg, processor=processor_name, func=mangled_funcname, ) exec_globals = {mangled_funcname: func, 'wraps': wraps} defaults_seen = 0 default_name_template = 'a' + uuid4().hex + '_%d' signature = [] call_args = [] assignments = [] star_map = { varargs: '*', varkw: '**', } def name_as_arg(arg): return star_map.get(arg, '') + arg for arg, default in args_defaults: if default is NO_DEFAULT: signature.append(name_as_arg(arg)) else: default_name = default_name_template % defaults_seen exec_globals[default_name] = default signature.append('='.join([name_as_arg(arg), default_name])) defaults_seen += 1 if arg in processors: procname = mangle('_processor_' + arg) exec_globals[procname] = processors[arg] assignments.append(make_processor_assignment(arg, procname)) call_args.append(name_as_arg(arg)) exec_str = dedent( """\ @wraps({wrapped_funcname}) def {func_name}({signature}): {assignments} return {wrapped_funcname}({call_args}) """ ).format( func_name=func.__name__, signature=', '.join(signature), assignments='\n '.join(assignments), wrapped_funcname=mangled_funcname, call_args=', '.join(call_args), ) compiled = compile( exec_str, func.__code__.co_filename, mode='exec', ) exec_locals = {} exec_(compiled, exec_globals, exec_locals) new_func = exec_locals[func.__name__] code = new_func.__code__ args = { attr: getattr(code, attr) for attr in dir(code) if attr.startswith('co_') } # Copy the firstlineno out of the underlying function so that exceptions # get raised with the correct traceback. # This also makes dynamic source inspection (like IPython `??` operator) # work as intended. try: # Try to get the pycode object from the underlying function. original_code = func.__code__ except AttributeError: try: # The underlying callable was not a function, try to grab the # `__func__.__code__` which exists on method objects. original_code = func.__func__.__code__ except AttributeError: # The underlying callable does not have a `__code__`. There is # nothing for us to correct. return new_func args['co_firstlineno'] = original_code.co_firstlineno new_func.__code__ = CodeType(*map(getitem(args), _code_argorder)) return new_func
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/preprocess.py
preprocess.py
from abc import ABCMeta, abstractmethod from collections import namedtuple import six import warnings import datetime import numpy as np import pandas as pd import pytz from toolz import curry from zipline.utils.input_validation import preprocess from zipline.utils.memoize import lazyval from zipline.utils.sentinel import sentinel from .context_tricks import nop_context __all__ = [ 'EventManager', 'Event', 'EventRule', 'StatelessRule', 'ComposedRule', 'Always', 'Never', 'AfterOpen', 'BeforeClose', 'NotHalfDay', 'NthTradingDayOfWeek', 'NDaysBeforeLastTradingDayOfWeek', 'NthTradingDayOfMonth', 'NDaysBeforeLastTradingDayOfMonth', 'StatefulRule', 'OncePerDay', # Factory API 'date_rules', 'time_rules', 'calendars', 'make_eventrule', ] MAX_MONTH_RANGE = 23 MAX_WEEK_RANGE = 5 def naive_to_utc(ts): """ Converts a UTC tz-naive timestamp to a tz-aware timestamp. """ # Drop the nanoseconds field. warn=False suppresses the warning # that we are losing the nanoseconds; however, this is intended. return pd.Timestamp(ts.to_pydatetime(warn=False), tz='UTC') def ensure_utc(time, tz='UTC'): """ Normalize a time. If the time is tz-naive, assume it is UTC. """ if not time.tzinfo: time = time.replace(tzinfo=pytz.timezone(tz)) return time.replace(tzinfo=pytz.utc) def _out_of_range_error(a, b=None, var='offset'): start = 0 if b is None: end = a - 1 else: start = a end = b - 1 return ValueError( '{var} must be in between {start} and {end} inclusive'.format( var=var, start=start, end=end, ) ) def _td_check(td): seconds = td.total_seconds() # 43200 seconds = 12 hours if 60 <= seconds <= 43200: return td else: raise ValueError('offset must be in between 1 minute and 12 hours, ' 'inclusive.') def _build_offset(offset, kwargs, default): """ Builds the offset argument for event rules. """ # Filter down to just kwargs that were actually passed. kwargs = {k: v for k, v in six.iteritems(kwargs) if v is not None} if offset is None: if not kwargs: return default # use the default. else: return _td_check(datetime.timedelta(**kwargs)) elif kwargs: raise ValueError('Cannot pass kwargs and an offset') elif isinstance(offset, datetime.timedelta): return _td_check(offset) else: raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") def _build_date(date, kwargs): """ Builds the date argument for event rules. """ if date is None: if not kwargs: raise ValueError('Must pass a date or kwargs') else: return datetime.date(**kwargs) elif kwargs: raise ValueError('Cannot pass kwargs and a date') else: return date def _build_time(time, kwargs): """ Builds the time argument for event rules. """ tz = kwargs.pop('tz', 'UTC') if time: if kwargs: raise ValueError('Cannot pass kwargs and a time') else: return ensure_utc(time, tz) elif not kwargs: raise ValueError('Must pass a time or kwargs') else: return datetime.time(**kwargs) @curry def lossless_float_to_int(funcname, func, argname, arg): """ A preprocessor that coerces integral floats to ints. Receipt of non-integral floats raises a TypeError. """ if not isinstance(arg, float): return arg arg_as_int = int(arg) if arg == arg_as_int: warnings.warn( "{f} expected an int for argument {name!r}, but got float {arg}." " Coercing to int.".format( f=funcname, name=argname, arg=arg, ), ) return arg_as_int raise TypeError(arg) class EventManager(object): """Manages a list of Event objects. This manages the logic for checking the rules and dispatching to the handle_data function of the Events. Parameters ---------- create_context : (BarData) -> context manager, optional An optional callback to produce a context manager to wrap the calls to handle_data. This will be passed the current BarData. """ def __init__(self, create_context=None): self._events = [] self._create_context = ( create_context if create_context is not None else lambda *_: nop_context ) def add_event(self, event, prepend=False): """ Adds an event to the manager. """ if prepend: self._events.insert(0, event) else: self._events.append(event) def handle_data(self, context, data, dt): with self._create_context(data): for event in self._events: event.handle_data( context, data, dt, ) class Event(namedtuple('Event', ['rule', 'callback'])): """ An event is a pairing of an EventRule and a callable that will be invoked with the current algorithm context, data, and datetime only when the rule is triggered. """ def __new__(cls, rule, callback=None): callback = callback or (lambda *args, **kwargs: None) return super(cls, cls).__new__(cls, rule=rule, callback=callback) def handle_data(self, context, data, dt): """ Calls the callable only when the rule is triggered. """ if self.rule.should_trigger(dt): self.callback(context, data) class EventRule(six.with_metaclass(ABCMeta)): """A rule defining when a scheduled function should execute. """ # Instances of EventRule are assigned a calendar instance when scheduling # a function. _cal = None @property def cal(self): return self._cal @cal.setter def cal(self, value): self._cal = value @abstractmethod def should_trigger(self, dt): """ Checks if the rule should trigger with its current state. This method should be pure and NOT mutate any state on the object. """ raise NotImplementedError('should_trigger') class StatelessRule(EventRule): """ A stateless rule has no observable side effects. This is reentrant and will always give the same result for the same datetime. Because these are pure, they can be composed to create new rules. """ def and_(self, rule): """ Logical and of two rules, triggers only when both rules trigger. This follows the short circuiting rules for normal and. """ return ComposedRule(self, rule, ComposedRule.lazy_and) __and__ = and_ class ComposedRule(StatelessRule): """ A rule that composes the results of two rules with some composing function. The composing function should be a binary function that accepts the results first(dt) and second(dt) as positional arguments. For example, operator.and_. If lazy=True, then the lazy composer is used instead. The lazy composer expects a function that takes the two should_trigger functions and the datetime. This is useful of you don't always want to call should_trigger for one of the rules. For example, this is used to implement the & and | operators so that they will have the same short circuit logic that is expected. """ def __init__(self, first, second, composer): if not (isinstance(first, StatelessRule) and isinstance(second, StatelessRule)): raise ValueError('Only two StatelessRules can be composed') self.first = first self.second = second self.composer = composer def should_trigger(self, dt): """ Composes the two rules with a lazy composer. """ return self.composer( self.first.should_trigger, self.second.should_trigger, dt ) @staticmethod def lazy_and(first_should_trigger, second_should_trigger, dt): """ Lazily ands the two rules. This will NOT call the should_trigger of the second rule if the first one returns False. """ return first_should_trigger(dt) and second_should_trigger(dt) @property def cal(self): return self.first.cal @cal.setter def cal(self, value): # Thread the calendar through to the underlying rules. self.first.cal = self.second.cal = value class Always(StatelessRule): """ A rule that always triggers. """ @staticmethod def always_trigger(dt): """ A should_trigger implementation that will always trigger. """ return True should_trigger = always_trigger class Never(StatelessRule): """ A rule that never triggers. """ @staticmethod def never_trigger(dt): """ A should_trigger implementation that will never trigger. """ return False should_trigger = never_trigger class AfterOpen(StatelessRule): """ A rule that triggers for some offset after the market opens. Example that triggers after 30 minutes of the market opening: >>> AfterOpen(minutes=30) # doctest: +ELLIPSIS <zipline.utils.events.AfterOpen object at ...> """ def __init__(self, offset=None, **kwargs): self.offset = _build_offset( offset, kwargs, datetime.timedelta(minutes=1), # Defaults to the first minute. ) self._period_start = None self._period_end = None self._period_close = None self._one_minute = datetime.timedelta(minutes=1) def calculate_dates(self, dt): """ Given a date, find that day's open and period end (open + offset). """ period_start, period_close = self.cal.open_and_close_for_session( self.cal.minute_to_session_label(dt), ) # Align the market open and close times here with the execution times # used by the simulation clock. This ensures that scheduled functions # trigger at the correct times. self._period_start = self.cal.execution_time_from_open(period_start) self._period_close = self.cal.execution_time_from_close(period_close) self._period_end = self._period_start + self.offset - self._one_minute def should_trigger(self, dt): # There are two reasons why we might want to recalculate the dates. # One is the first time we ever call should_trigger, when # self._period_start is none. The second is when we're on a new day, # and need to recalculate the dates. For performance reasons, we rely # on the fact that our clock only ever ticks forward, since it's # cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means # that we will NOT correctly recognize a new date if we go backwards # in time(which should never happen in a simulation, or in live # trading) if ( self._period_start is None or self._period_close <= dt ): self.calculate_dates(dt) return dt == self._period_end class BeforeClose(StatelessRule): """ A rule that triggers for some offset time before the market closes. Example that triggers for the last 30 minutes every day: >>> BeforeClose(minutes=30) # doctest: +ELLIPSIS <zipline.utils.events.BeforeClose object at ...> """ def __init__(self, offset=None, **kwargs): self.offset = _build_offset( offset, kwargs, datetime.timedelta(minutes=1), # Defaults to the last minute. ) self._period_start = None self._period_close = None self._period_end = None self._one_minute = datetime.timedelta(minutes=1) def calculate_dates(self, dt): """ Given a dt, find that day's close and period start (close - offset). """ period_end = self.cal.open_and_close_for_session( self.cal.minute_to_session_label(dt), )[1] # Align the market close time here with the execution time used by the # simulation clock. This ensures that scheduled functions trigger at # the correct times. self._period_end = self.cal.execution_time_from_close(period_end) self._period_start = self._period_end - self.offset self._period_close = self._period_end def should_trigger(self, dt): # There are two reasons why we might want to recalculate the dates. # One is the first time we ever call should_trigger, when # self._period_start is none. The second is when we're on a new day, # and need to recalculate the dates. For performance reasons, we rely # on the fact that our clock only ever ticks forward, since it's # cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means # that we will NOT correctly recognize a new date if we go backwards # in time(which should never happen in a simulation, or in live # trading) if self._period_start is None or self._period_close <= dt: self.calculate_dates(dt) return self._period_start == dt class NotHalfDay(StatelessRule): """ A rule that only triggers when it is not a half day. """ def should_trigger(self, dt): return self.cal.minute_to_session_label(dt) \ not in self.cal.early_closes class TradingDayOfWeekRule(six.with_metaclass(ABCMeta, StatelessRule)): @preprocess(n=lossless_float_to_int('TradingDayOfWeekRule')) def __init__(self, n, invert): if not 0 <= n < MAX_WEEK_RANGE: raise _out_of_range_error(MAX_WEEK_RANGE) self.td_delta = (-n - 1) if invert else n def should_trigger(self, dt): # is this market minute's period in the list of execution periods? val = self.cal.minute_to_session_label(dt, direction="none").value return val in self.execution_period_values @lazyval def execution_period_values(self): # calculate the list of periods that match the given criteria sessions = self.cal.all_sessions return set( pd.Series(data=sessions) # Group by ISO year (0) and week (1) .groupby(sessions.map(lambda x: x.isocalendar()[0:2])) .nth(self.td_delta) .astype(np.int64) ) class NthTradingDayOfWeek(TradingDayOfWeekRule): """ A rule that triggers on the nth trading day of the week. This is zero-indexed, n=0 is the first trading day of the week. """ def __init__(self, n): super(NthTradingDayOfWeek, self).__init__(n, invert=False) class NDaysBeforeLastTradingDayOfWeek(TradingDayOfWeekRule): """ A rule that triggers n days before the last trading day of the week. """ def __init__(self, n): super(NDaysBeforeLastTradingDayOfWeek, self).__init__(n, invert=True) class TradingDayOfMonthRule(six.with_metaclass(ABCMeta, StatelessRule)): @preprocess(n=lossless_float_to_int('TradingDayOfMonthRule')) def __init__(self, n, invert): if not 0 <= n < MAX_MONTH_RANGE: raise _out_of_range_error(MAX_MONTH_RANGE) if invert: self.td_delta = -n - 1 else: self.td_delta = n def should_trigger(self, dt): # is this market minute's period in the list of execution periods? value = self.cal.minute_to_session_label(dt, direction="none").value return value in self.execution_period_values @lazyval def execution_period_values(self): # calculate the list of periods that match the given criteria sessions = self.cal.all_sessions return set( pd.Series(data=sessions) .groupby([sessions.year, sessions.month]) .nth(self.td_delta) .astype(np.int64) ) class NthTradingDayOfMonth(TradingDayOfMonthRule): """ A rule that triggers on the nth trading day of the month. This is zero-indexed, n=0 is the first trading day of the month. """ def __init__(self, n): super(NthTradingDayOfMonth, self).__init__(n, invert=False) class NDaysBeforeLastTradingDayOfMonth(TradingDayOfMonthRule): """ A rule that triggers n days before the last trading day of the month. """ def __init__(self, n): super(NDaysBeforeLastTradingDayOfMonth, self).__init__(n, invert=True) # Stateful rules class StatefulRule(EventRule): """ A stateful rule has state. This rule will give different results for the same datetimes depending on the internal state that this holds. StatefulRules wrap other rules as state transformers. """ def __init__(self, rule=None): self.rule = rule or Always() @property def cal(self): return self.rule.cal @cal.setter def cal(self, value): # Thread the calendar through to the underlying rule. self.rule.cal = value class OncePerDay(StatefulRule): def __init__(self, rule=None): self.triggered = False self.date = None self.next_date = None super(OncePerDay, self).__init__(rule) def should_trigger(self, dt): if self.date is None or dt >= self.next_date: # initialize or reset for new date self.triggered = False self.date = dt # record the timestamp for the next day, so that we can use it # to know if we've moved to the next day self.next_date = dt + pd.Timedelta(1, unit="d") if not self.triggered and self.rule.should_trigger(dt): self.triggered = True return True # Factory API class date_rules(object): """ Factories for date-based :func:`~zipline.api.schedule_function` rules. See Also -------- :func:`~zipline.api.schedule_function` """ @staticmethod def every_day(): """Create a rule that triggers every day. Returns ------- rule : zipline.utils.events.EventRule """ return Always() @staticmethod def month_start(days_offset=0): """ Create a rule that triggers a fixed number of trading days after the start of each month. Parameters ---------- days_offset : int, optional Number of trading days to wait before triggering each month. Default is 0, i.e., trigger on the first trading day of the month. Returns ------- rule : zipline.utils.events.EventRule """ return NthTradingDayOfMonth(n=days_offset) @staticmethod def month_end(days_offset=0): """ Create a rule that triggers a fixed number of trading days before the end of each month. Parameters ---------- days_offset : int, optional Number of trading days prior to month end to trigger. Default is 0, i.e., trigger on the last day of the month. Returns ------- rule : zipline.utils.events.EventRule """ return NDaysBeforeLastTradingDayOfMonth(n=days_offset) @staticmethod def week_start(days_offset=0): """ Create a rule that triggers a fixed number of trading days after the start of each week. Parameters ---------- days_offset : int, optional Number of trading days to wait before triggering each week. Default is 0, i.e., trigger on the first trading day of the week. """ return NthTradingDayOfWeek(n=days_offset) @staticmethod def week_end(days_offset=0): """ Create a rule that triggers a fixed number of trading days before the end of each week. Parameters ---------- days_offset : int, optional Number of trading days prior to week end to trigger. Default is 0, i.e., trigger on the last trading day of the week. """ return NDaysBeforeLastTradingDayOfWeek(n=days_offset) class time_rules(object): """Factories for time-based :func:`~zipline.api.schedule_function` rules. See Also -------- :func:`~zipline.api.schedule_function` """ @staticmethod def market_open(offset=None, hours=None, minutes=None): """ Create a rule that triggers at a fixed offset from market open. The offset can be specified either as a :class:`datetime.timedelta`, or as a number of hours and minutes. Parameters ---------- offset : datetime.timedelta, optional If passed, the offset from market open at which to trigger. Must be at least 1 minute. hours : int, optional If passed, number of hours to wait after market open. minutes : int, optional If passed, number of minutes to wait after market open. Returns ------- rule : zipline.utils.events.EventRule Notes ----- If no arguments are passed, the default offset is one minute after market open. If ``offset`` is passed, ``hours`` and ``minutes`` must not be passed. Conversely, if either ``hours`` or ``minutes`` are passed, ``offset`` must not be passed. """ return AfterOpen(offset=offset, hours=hours, minutes=minutes) @staticmethod def market_close(offset=None, hours=None, minutes=None): """ Create a rule that triggers at a fixed offset from market close. The offset can be specified either as a :class:`datetime.timedelta`, or as a number of hours and minutes. Parameters ---------- offset : datetime.timedelta, optional If passed, the offset from market close at which to trigger. Must be at least 1 minute. hours : int, optional If passed, number of hours to wait before market close. minutes : int, optional If passed, number of minutes to wait before market close. Returns ------- rule : zipline.utils.events.EventRule Notes ----- If no arguments are passed, the default offset is one minute before market close. If ``offset`` is passed, ``hours`` and ``minutes`` must not be passed. Conversely, if either ``hours`` or ``minutes`` are passed, ``offset`` must not be passed. """ return BeforeClose(offset=offset, hours=hours, minutes=minutes) every_minute = Always class calendars(object): US_EQUITIES = sentinel('US_EQUITIES') US_FUTURES = sentinel('US_FUTURES') def _invert(d): return dict(zip(d.values(), d.keys())) _uncalled_rules = _invert(vars(date_rules)) _uncalled_rules.update(_invert(vars(time_rules))) def _check_if_not_called(v): try: name = _uncalled_rules[v] except KeyError: if not issubclass(v, EventRule): return name = getattr(v, '__name__', None) msg = 'invalid rule: %r' % (v,) if name is not None: msg += ' (hint: did you mean %s())' % name raise TypeError(msg) def make_eventrule(date_rule, time_rule, cal, half_days=True): """ Constructs an event rule from the factory api. """ _check_if_not_called(date_rule) _check_if_not_called(time_rule) if half_days: inner_rule = date_rule & time_rule else: inner_rule = date_rule & time_rule & NotHalfDay() opd = OncePerDay(rule=inner_rule) # This is where a scheduled function's rule is associated with a calendar. opd.cal = cal return opd
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/events.py
events.py
from collections import OrderedDict, Sequence from itertools import compress from weakref import WeakKeyDictionary, ref from six.moves._thread import allocate_lock as Lock from toolz.sandbox import unzip from trading_calendars.utils.memoize import lazyval from zipline.utils.compat import wraps class classlazyval(lazyval): """ Decorator that marks that an attribute of a class should not be computed until needed, and that the value should be memoized. Example ------- >>> from zipline.utils.memoize import classlazyval >>> class C(object): ... count = 0 ... @classlazyval ... def val(cls): ... cls.count += 1 ... return "val" ... >>> C.count 0 >>> C.val, C.count ('val', 1) >>> C.val, C.count ('val', 1) """ # We don't reassign the name on the class to implement the caching because # then we would need to use a metaclass to track the name of the # descriptor. def __get__(self, instance, owner): return super(classlazyval, self).__get__(owner, owner) def _weak_lru_cache(maxsize=100): """ Users should only access the lru_cache through its public API: cache_info, cache_clear The internals of the lru_cache are encapsulated for thread safety and to allow the implementation to change. """ def decorating_function( user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError): hits, misses = [0], [0] kwd_mark = (object(),) # separates positional and keyword args lock = Lock() # needed because OrderedDict isn't threadsafe if maxsize is None: cache = _WeakArgsDict() # cache without ordering or size limit @wraps(user_function) def wrapper(*args, **kwds): key = args if kwds: key += kwd_mark + tuple(sorted(kwds.items())) try: result = cache[key] hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) cache[key] = result misses[0] += 1 return result else: # ordered least recent to most recent cache = _WeakArgsOrderedDict() cache_popitem = cache.popitem cache_renew = cache.move_to_end @wraps(user_function) def wrapper(*args, **kwds): key = args if kwds: key += kwd_mark + tuple(sorted(kwds.items())) with lock: try: result = cache[key] cache_renew(key) # record recent use of this key hits[0] += 1 return result except KeyError: pass result = user_function(*args, **kwds) with lock: cache[key] = result # record recent use of this key misses[0] += 1 if len(cache) > maxsize: # purge least recently used cache entry cache_popitem(False) return result def cache_info(): """Report cache statistics""" with lock: return hits[0], misses[0], maxsize, len(cache) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() hits[0] = misses[0] = 0 wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return wrapper return decorating_function class _WeakArgs(Sequence): """ Works with _WeakArgsDict to provide a weak cache for function args. When any of those args are gc'd, the pair is removed from the cache. """ def __init__(self, items, dict_remove=None): def remove(k, selfref=ref(self), dict_remove=dict_remove): self = selfref() if self is not None and dict_remove is not None: dict_remove(self) self._items, self._selectors = unzip(self._try_ref(item, remove) for item in items) self._items = tuple(self._items) self._selectors = tuple(self._selectors) def __getitem__(self, index): return self._items[index] def __len__(self): return len(self._items) @staticmethod def _try_ref(item, callback): try: return ref(item, callback), True except TypeError: return item, False @property def alive(self): return all(item() is not None for item in compress(self._items, self._selectors)) def __eq__(self, other): return self._items == other._items def __hash__(self): try: return self.__hash except AttributeError: h = self.__hash = hash(self._items) return h class _WeakArgsDict(WeakKeyDictionary, object): def __delitem__(self, key): del self.data[_WeakArgs(key)] def __getitem__(self, key): return self.data[_WeakArgs(key)] def __repr__(self): return '%s(%r)' % (type(self).__name__, self.data) def __setitem__(self, key, value): self.data[_WeakArgs(key, self._remove)] = value def __contains__(self, key): try: wr = _WeakArgs(key) except TypeError: return False return wr in self.data def pop(self, key, *args): return self.data.pop(_WeakArgs(key), *args) class _WeakArgsOrderedDict(_WeakArgsDict, object): def __init__(self): super(_WeakArgsOrderedDict, self).__init__() self.data = OrderedDict() def popitem(self, last=True): while True: key, value = self.data.popitem(last) if key.alive: return tuple(key), value def move_to_end(self, key): """Move an existing element to the end. Raises KeyError if the element does not exist. """ self[key] = self.pop(key) def weak_lru_cache(maxsize=100): """Weak least-recently-used cache decorator. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. Arguments to the cached function must be hashable. Any that are weak- referenceable will be stored by weak reference. Once any of the args have been garbage collected, the entry will be removed from the cache. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ class desc(lazyval): def __get__(self, instance, owner): if instance is None: return self try: return self._cache[instance] except KeyError: inst = ref(instance) @_weak_lru_cache(maxsize) @wraps(self._get) def wrapper(*args, **kwargs): return self._get(inst(), *args, **kwargs) self._cache[instance] = wrapper return wrapper @_weak_lru_cache(maxsize) def __call__(self, *args, **kwargs): return self._get(*args, **kwargs) return desc remember_last = weak_lru_cache(1)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/memoize.py
memoize.py
import zipline.api from zipline.utils.compat import wraps from zipline.utils.algo_instance import get_algo_instance, set_algo_instance class ZiplineAPI(object): """ Context manager for making an algorithm instance available to zipline API functions within a scoped block. """ def __init__(self, algo_instance): self.algo_instance = algo_instance def __enter__(self): """ Set the given algo instance, storing any previously-existing instance. """ self.old_algo_instance = get_algo_instance() set_algo_instance(self.algo_instance) def __exit__(self, _type, _value, _tb): """ Restore the algo instance stored in __enter__. """ set_algo_instance(self.old_algo_instance) def api_method(f): # Decorator that adds the decorated class method as a callable # function (wrapped) to zipline.api @wraps(f) def wrapped(*args, **kwargs): # Get the instance and call the method algo_instance = get_algo_instance() if algo_instance is None: raise RuntimeError( 'zipline api method %s must be called during a simulation.' % f.__name__ ) return getattr(algo_instance, f.__name__)(*args, **kwargs) # Add functor to zipline.api setattr(zipline.api, f.__name__, wrapped) zipline.api.__all__.append(f.__name__) f.is_api_method = True return f def require_not_initialized(exception): """ Decorator for API methods that should only be called during or before TradingAlgorithm.initialize. `exception` will be raised if the method is called after initialize. Examples -------- @require_not_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed during initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator def require_initialized(exception): """ Decorator for API methods that should only be called after TradingAlgorithm.initialize. `exception` will be raised if the method is called before initialize has completed. Examples -------- @require_initialized(SomeException("Don't do that!")) def method(self): # Do stuff that should only be allowed after initialize. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self.initialized: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator def disallowed_in_before_trading_start(exception): """ Decorator for API methods that cannot be called from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called inside `before_trading_start`. Examples -------- @disallowed_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is not allowed inside before_trading_start. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator def allowed_only_in_before_trading_start(exception): """ Decorator for API methods that can be called only from within TradingAlgorithm.before_trading_start. `exception` will be raised if the method is called outside `before_trading_start`. Usage ----- @allowed_only_in_before_trading_start(SomeException("Don't do that!")) def method(self): # Do stuff that is only allowed inside before_trading_start. """ def decorator(method): @wraps(method) def wrapped_method(self, *args, **kwargs): if not self._in_before_trading_start: raise exception return method(self, *args, **kwargs) return wrapped_method return decorator
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/api_support.py
api_support.py
import pandas as pd import numpy as np from datetime import timedelta, datetime from trading_calendars import get_calendar from zipline.sources import SpecificEquityTrades from zipline.finance.trading import SimulationParameters from zipline.sources.test_source import create_trade def create_simulation_parameters(year=2006, start=None, end=None, capital_base=float("1.0e5"), num_days=None, data_frequency='daily', emission_rate='daily', trading_calendar=None): if not trading_calendar: trading_calendar = get_calendar("NYSE") if start is None: start = pd.Timestamp("{0}-01-01".format(year), tz='UTC') elif type(start) == datetime: start = pd.Timestamp(start) if end is None: if num_days: start_index = trading_calendar.all_sessions.searchsorted(start) end = trading_calendar.all_sessions[start_index + num_days - 1] else: end = pd.Timestamp("{0}-12-31".format(year), tz='UTC') elif type(end) == datetime: end = pd.Timestamp(end) sim_params = SimulationParameters( start_session=start, end_session=end, capital_base=capital_base, data_frequency=data_frequency, emission_rate=emission_rate, trading_calendar=trading_calendar, ) return sim_params def get_next_trading_dt(current, interval, trading_calendar): next_dt = pd.Timestamp(current).tz_convert(trading_calendar.tz) while True: # Convert timestamp to naive before adding day, otherwise the when # stepping over EDT an hour is added. next_dt = pd.Timestamp(next_dt.replace(tzinfo=None)) next_dt = next_dt + interval next_dt = pd.Timestamp(next_dt, tz=trading_calendar.tz) next_dt_utc = next_dt.tz_convert('UTC') if trading_calendar.is_open_on_minute(next_dt_utc): break next_dt = next_dt_utc.tz_convert(trading_calendar.tz) return next_dt_utc def create_trade_history(sid, prices, amounts, interval, sim_params, trading_calendar, source_id="test_factory"): trades = [] current = sim_params.first_open oneday = timedelta(days=1) use_midnight = interval >= oneday for price, amount in zip(prices, amounts): if use_midnight: trade_dt = current.replace(hour=0, minute=0) else: trade_dt = current trade = create_trade(sid, price, amount, trade_dt, source_id) trades.append(trade) current = get_next_trading_dt(current, interval, trading_calendar) assert len(trades) == len(prices) return trades def create_returns_from_range(sim_params): return pd.Series(index=sim_params.sessions, data=np.random.rand(len(sim_params.sessions))) def create_returns_from_list(returns, sim_params): return pd.Series(index=sim_params.sessions[:len(returns)], data=returns) def create_daily_trade_source(sids, sim_params, asset_finder, trading_calendar): """ creates trade_count trades for each sid in sids list. first trade will be on sim_params.start_session, and daily thereafter for each sid. Thus, two sids should result in two trades per day. """ return create_trade_source( sids, timedelta(days=1), sim_params, asset_finder, trading_calendar=trading_calendar, ) def create_trade_source(sids, trade_time_increment, sim_params, asset_finder, trading_calendar): # If the sim_params define an end that is during market hours, that will be # used as the end of the data source if trading_calendar.is_open_on_minute(sim_params.end_session): end = sim_params.end_session # Otherwise, the last_close after the end_session is used as the end of the # data source else: end = sim_params.last_close args = tuple() kwargs = { 'sids': sids, 'start': sim_params.first_open, 'end': end, 'delta': trade_time_increment, 'trading_calendar': trading_calendar, 'asset_finder': asset_finder, } source = SpecificEquityTrades(*args, **kwargs) return source
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/factory.py
factory.py
import click import pandas as pd from .context_tricks import CallbackManager def maybe_show_progress(it, show_progress, **kwargs): """Optionally show a progress bar for the given iterator. Parameters ---------- it : iterable The underlying iterator. show_progress : bool Should progress be shown. **kwargs Forwarded to the click progress bar. Returns ------- itercontext : context manager A context manager whose enter is the actual iterator to use. Examples -------- .. code-block:: python with maybe_show_progress([1, 2, 3], True) as ns: for n in ns: ... """ if show_progress: return click.progressbar(it, **kwargs) # context manager that just return `it` when we enter it return CallbackManager(lambda it=it: it) class _DatetimeParam(click.ParamType): def __init__(self, tz=None): self.tz = tz def parser(self, value): return pd.Timestamp(value, tz=self.tz) @property def name(self): return type(self).__name__.upper() def convert(self, value, param, ctx): try: return self.parser(value) except ValueError: self.fail( '%s is not a valid %s' % (value, self.name.lower()), param, ctx, ) class Timestamp(_DatetimeParam): """A click parameter that parses the value into pandas.Timestamp objects. Parameters ---------- tz : timezone-coercable, optional The timezone to parse the string as. By default the timezone will be infered from the string or naiive. """ class Date(_DatetimeParam): """A click parameter that parses the value into datetime.date objects. Parameters ---------- tz : timezone-coercable, optional The timezone to parse the string as. By default the timezone will be infered from the string or naiive. as_timestamp : bool, optional If True, return the value as a pd.Timestamp object normalized to midnight. """ def __init__(self, tz=None, as_timestamp=False): super(Date, self).__init__(tz=tz) self.as_timestamp = as_timestamp def parser(self, value): ts = super(Date, self).parser(value) return ts.normalize() if self.as_timestamp else ts.date() class Time(_DatetimeParam): """A click parameter that parses the value into timetime.time objects. Parameters ---------- tz : timezone-coercable, optional The timezone to parse the string as. By default the timezone will be infered from the string or naiive. """ def parser(self, value): return super(Time, self).parser(value).time() class Timedelta(_DatetimeParam): """A click parameter that parses values into pd.Timedelta objects. Parameters ---------- unit : {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional Denotes the unit of the input if the input is an integer. """ def __init__(self, unit='ns'): self.unit = unit def parser(self, value): return pd.Timedelta(value, unit=self.unit)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/utils/cli.py
cli.py
import abc import logbook from datetime import datetime import pandas as pd from six import with_metaclass from zipline.errors import ( AccountControlViolation, TradingControlViolation, ) from zipline.utils.input_validation import ( expect_bounded, expect_types, ) log = logbook.Logger('TradingControl') class TradingControl(with_metaclass(abc.ABCMeta)): """ Abstract base class representing a fail-safe control on the behavior of any algorithm. """ def __init__(self, on_error, **kwargs): """ Track any arguments that should be printed in the error message generated by self.fail. """ self.on_error = on_error self.__fail_args = kwargs @abc.abstractmethod def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Before any order is executed by TradingAlgorithm, this method should be called *exactly once* on each registered TradingControl object. If the specified asset and amount do not violate this TradingControl's restraint given the information in `portfolio`, this method should return None and have no externally-visible side-effects. If the desired order violates this TradingControl's contraint, this method should call self.fail(asset, amount). """ raise NotImplementedError def _constraint_msg(self, metadata): constraint = repr(self) if metadata: constraint = "{constraint} (Metadata: {metadata})".format( constraint=constraint, metadata=metadata ) return constraint def handle_violation(self, asset, amount, datetime, metadata=None): """ Handle a TradingControlViolation, either by raising or logging and error with information about the failure. If dynamic information should be displayed as well, pass it in via `metadata`. """ constraint = self._constraint_msg(metadata) if self.on_error == 'fail': raise TradingControlViolation( asset=asset, amount=amount, datetime=datetime, constraint=constraint) elif self.on_error == 'log': log.error("Order for {amount} shares of {asset} at {dt} " "violates trading constraint {constraint}", amount=amount, asset=asset, dt=datetime, constraint=constraint) def __repr__(self): return "{name}({attrs})".format(name=self.__class__.__name__, attrs=self.__fail_args) class MaxOrderCount(TradingControl): """ TradingControl representing a limit on the number of orders that can be placed in a given trading day. """ def __init__(self, on_error, max_count): super(MaxOrderCount, self).__init__(on_error, max_count=max_count) self.orders_placed = 0 self.max_count = max_count self.current_date = None def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we've already placed self.max_count orders today. """ algo_date = algo_datetime.date() # Reset order count if it's a new day. if self.current_date and self.current_date != algo_date: self.orders_placed = 0 self.current_date = algo_date if self.orders_placed >= self.max_count: self.handle_violation(asset, amount, algo_datetime) self.orders_placed += 1 class RestrictedListOrder(TradingControl): """TradingControl representing a restricted list of assets that cannot be ordered by the algorithm. Parameters ---------- restrictions : zipline.finance.asset_restrictions.Restrictions Object representing restrictions of a group of assets. """ def __init__(self, on_error, restrictions): super(RestrictedListOrder, self).__init__(on_error) self.restrictions = restrictions def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the asset is in the restricted_list. """ if self.restrictions.is_restricted(asset, algo_datetime): self.handle_violation(asset, amount, algo_datetime) class MaxOrderSize(TradingControl): """ TradingControl representing a limit on the magnitude of any single order placed with the given asset. Can be specified by share or by dollar value. """ def __init__(self, on_error, asset=None, max_shares=None, max_notional=None): super(MaxOrderSize, self).__init__(on_error, asset=asset, max_shares=max_shares, max_notional=max_notional) self.asset = asset self.max_shares = max_shares self.max_notional = max_notional if max_shares is None and max_notional is None: raise ValueError( "Must supply at least one of max_shares and max_notional" ) if max_shares and max_shares < 0: raise ValueError( "max_shares cannot be negative." ) if max_notional and max_notional < 0: raise ValueError( "max_notional must be positive." ) def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the magnitude of the given order exceeds either self.max_shares or self.max_notional. """ if self.asset is not None and self.asset != asset: return if self.max_shares is not None and abs(amount) > self.max_shares: self.handle_violation(asset, amount, algo_datetime) current_asset_price = algo_current_data.current(asset, "price") order_value = amount * current_asset_price too_much_value = (self.max_notional is not None and abs(order_value) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime) class MaxPositionSize(TradingControl): """ TradingControl representing a limit on the maximum position size that can be held by an algo for a given asset. """ def __init__(self, on_error, asset=None, max_shares=None, max_notional=None): super(MaxPositionSize, self).__init__(on_error, asset=asset, max_shares=max_shares, max_notional=max_notional) self.asset = asset self.max_shares = max_shares self.max_notional = max_notional if max_shares is None and max_notional is None: raise ValueError( "Must supply at least one of max_shares and max_notional" ) if max_shares and max_shares < 0: raise ValueError( "max_shares cannot be negative." ) if max_notional and max_notional < 0: raise ValueError( "max_notional must be positive." ) def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the given order would cause the magnitude of our position to be greater in shares than self.max_shares or greater in dollar value than self.max_notional. """ if self.asset is not None and self.asset != asset: return current_share_count = portfolio.positions[asset].amount shares_post_order = current_share_count + amount too_many_shares = (self.max_shares is not None and abs(shares_post_order) > self.max_shares) if too_many_shares: self.handle_violation(asset, amount, algo_datetime) current_price = algo_current_data.current(asset, "price") value_post_order = shares_post_order * current_price too_much_value = (self.max_notional is not None and abs(value_post_order) > self.max_notional) if too_much_value: self.handle_violation(asset, amount, algo_datetime) class LongOnly(TradingControl): """ TradingControl representing a prohibition against holding short positions. """ def __init__(self, on_error): super(LongOnly, self).__init__(on_error) def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if we would hold negative shares of asset after completing this order. """ if portfolio.positions[asset].amount + amount < 0: self.handle_violation(asset, amount, algo_datetime) class AssetDateBounds(TradingControl): """ TradingControl representing a prohibition against ordering an asset before its start_date, or after its end_date. """ def __init__(self, on_error): super(AssetDateBounds, self).__init__(on_error) def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data): """ Fail if the algo has passed this Asset's end_date, or before the Asset's start date. """ # If the order is for 0 shares, then silently pass through. if amount == 0: return normalized_algo_dt = pd.Timestamp(algo_datetime).normalize() # Fail if the algo is before this Asset's start_date if asset.start_date: normalized_start = pd.Timestamp(asset.start_date).normalize() if normalized_algo_dt < normalized_start: metadata = { 'asset_start_date': normalized_start } self.handle_violation( asset, amount, algo_datetime, metadata=metadata) # Fail if the algo has passed this Asset's end_date if asset.end_date: normalized_end = pd.Timestamp(asset.end_date).normalize() if normalized_algo_dt > normalized_end: metadata = { 'asset_end_date': normalized_end } self.handle_violation( asset, amount, algo_datetime, metadata=metadata) class AccountControl(with_metaclass(abc.ABCMeta)): """ Abstract base class representing a fail-safe control on the behavior of any algorithm. """ def __init__(self, **kwargs): """ Track any arguments that should be printed in the error message generated by self.fail. """ self.__fail_args = kwargs @abc.abstractmethod def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data): """ On each call to handle data by TradingAlgorithm, this method should be called *exactly once* on each registered AccountControl object. If the check does not violate this AccountControl's restraint given the information in `portfolio` and `account`, this method should return None and have no externally-visible side-effects. If the desired order violates this AccountControl's contraint, this method should call self.fail(). """ raise NotImplementedError def fail(self): """ Raise an AccountControlViolation with information about the failure. """ raise AccountControlViolation(constraint=repr(self)) def __repr__(self): return "{name}({attrs})".format(name=self.__class__.__name__, attrs=self.__fail_args) class MaxLeverage(AccountControl): """ AccountControl representing a limit on the maximum leverage allowed by the algorithm. """ def __init__(self, max_leverage): """ max_leverage is the gross leverage in decimal form. For example, 2, limits an algorithm to trading at most double the account value. """ super(MaxLeverage, self).__init__(max_leverage=max_leverage) self.max_leverage = max_leverage if max_leverage is None: raise ValueError( "Must supply max_leverage" ) if max_leverage < 0: raise ValueError( "max_leverage must be positive" ) def validate(self, _portfolio, _account, _algo_datetime, _algo_current_data): """ Fail if the leverage is greater than the allowed leverage. """ if _account.leverage > self.max_leverage: self.fail() class MinLeverage(AccountControl): """AccountControl representing a limit on the minimum leverage allowed by the algorithm after a threshold period of time. Parameters ---------- min_leverage : float The gross leverage in decimal form. deadline : datetime The date the min leverage must be achieved by. For example, min_leverage=2 limits an algorithm to trading at minimum double the account value by the deadline date. """ @expect_types( __funcname='MinLeverage', min_leverage=(int, float), deadline=datetime ) @expect_bounded(__funcname='MinLeverage', min_leverage=(0, None)) def __init__(self, min_leverage, deadline): super(MinLeverage, self).__init__(min_leverage=min_leverage, deadline=deadline) self.min_leverage = min_leverage self.deadline = deadline def validate(self, _portfolio, account, algo_datetime, _algo_current_data): """ Make validation checks if we are after the deadline. Fail if the leverage is less than the min leverage. """ if (algo_datetime > self.deadline and account.leverage < self.min_leverage): self.fail()
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/controls.py
controls.py
TRADING_DAYS_IN_YEAR = 250 TRADING_HOURS_IN_DAY = 6.5 MINUTES_IN_HOUR = 60 ANNUALIZER = {'daily': TRADING_DAYS_IN_YEAR, 'hourly': TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY, 'minute': TRADING_DAYS_IN_YEAR * TRADING_HOURS_IN_DAY * MINUTES_IN_HOUR} # NOTE: It may be worth revisiting how the keys for this dictionary are # specified, for instance making them ContinuousFuture objects instead of # static strings. FUTURE_EXCHANGE_FEES_BY_SYMBOL = { 'AD': 1.60, # AUD 'AI': 0.96, # Bloomberg Commodity Index 'BD': 1.50, # Big Dow 'BO': 1.95, # Soybean Oil 'BP': 1.60, # GBP 'CD': 1.60, # CAD 'CL': 1.50, # Crude Oil 'CM': 1.03, # Corn e-mini 'CN': 1.95, # Corn 'DJ': 1.50, # Dow Jones 'EC': 1.60, # Euro FX 'ED': 1.25, # Eurodollar 'EE': 1.50, # Euro FX e-mini 'EI': 1.50, # MSCI Emerging Markets mini 'EL': 1.50, # Eurodollar NYSE LIFFE 'ER': 0.65, # Russell2000 e-mini 'ES': 1.18, # SP500 e-mini 'ET': 1.50, # Ethanol 'EU': 1.50, # Eurodollar e-micro 'FC': 2.03, # Feeder Cattle 'FF': 0.96, # 3-Day Federal Funds 'FI': 0.56, # Deliverable Interest Rate Swap 5y 'FS': 1.50, # Interest Rate Swap 5y 'FV': 0.65, # US 5y 'GC': 1.50, # Gold 'HG': 1.50, # Copper 'HO': 1.50, # Heating Oil 'HU': 1.50, # Unleaded Gasoline 'JE': 0.16, # JPY e-mini 'JY': 1.60, # JPY 'LB': 2.03, # Lumber 'LC': 2.03, # Live Cattle 'LH': 2.03, # Lean Hogs 'MB': 1.50, # Municipal Bonds 'MD': 1.50, # SP400 Midcap 'ME': 1.60, # MXN 'MG': 1.50, # MSCI EAFE mini 'MI': 1.18, # SP400 Midcap e-mini 'MS': 1.03, # Soybean e-mini 'MW': 1.03, # Wheat e-mini 'ND': 1.50, # Nasdaq100 'NG': 1.50, # Natural Gas 'NK': 2.15, # Nikkei225 'NQ': 1.18, # Nasdaq100 e-mini 'NZ': 1.60, # NZD 'OA': 1.95, # Oats 'PA': 1.50, # Palladium 'PB': 1.50, # Pork Bellies 'PL': 1.50, # Platinum 'QG': 0.50, # Natural Gas e-mini 'QM': 1.20, # Crude Oil e-mini 'RM': 1.50, # Russell1000 e-mini 'RR': 1.95, # Rough Rice 'SB': 2.10, # Sugar 'SF': 1.60, # CHF 'SM': 1.95, # Soybean Meal 'SP': 2.40, # SP500 'SV': 1.50, # Silver 'SY': 1.95, # Soybean 'TB': 1.50, # Treasury Bills 'TN': 0.56, # Deliverable Interest Rate Swap 10y 'TS': 1.50, # Interest Rate Swap 10y 'TU': 1.50, # US 2y 'TY': 0.75, # US 10y 'UB': 0.85, # Ultra Tbond 'US': 0.80, # US 30y 'VX': 1.50, # VIX 'WC': 1.95, # Wheat 'XB': 1.50, # RBOB Gasoline 'XG': 0.75, # Gold e-mini 'YM': 1.50, # Dow Jones e-mini 'YS': 0.75, # Silver e-mini } # See `zipline.finance.slippage.VolatilityVolumeShare` for more information on # how these constants are used. DEFAULT_ETA = 0.049018143225019836 ROOT_SYMBOL_TO_ETA = { 'AD': DEFAULT_ETA, # AUD 'AI': DEFAULT_ETA, # Bloomberg Commodity Index 'BD': 0.050346811117733474, # Big Dow 'BO': 0.054930995070046298, # Soybean Oil 'BP': 0.047841544238716338, # GBP 'CD': 0.051124420640250717, # CAD 'CL': 0.04852544628414196, # Crude Oil 'CM': 0.052683478163348625, # Corn e-mini 'CN': 0.053499718390037809, # Corn 'DJ': 0.02313009072076987, # Dow Jones 'EC': 0.04885131067661861, # Euro FX 'ED': 0.094184297090245755, # Eurodollar 'EE': 0.048713151357687556, # Euro FX e-mini 'EI': 0.031712708439692663, # MSCI Emerging Markets mini 'EL': 0.044207422018209361, # Eurodollar NYSE LIFFE 'ER': 0.045930567737711307, # Russell2000 e-mini 'ES': 0.047304418321993502, # SP500 e-mini 'ET': DEFAULT_ETA, # Ethanol 'EU': 0.049750396084029064, # Eurodollar e-micro 'FC': 0.058728734202178494, # Feeder Cattle 'FF': 0.048970591527624042, # 3-Day Federal Funds 'FI': 0.033477176738170772, # Deliverable Interest Rate Swap 5y 'FS': 0.034557788010453824, # Interest Rate Swap 5y 'FV': 0.046544427716056963, # US 5y 'GC': 0.048933313546125207, # Gold 'HG': 0.052238417524987799, # Copper 'HO': 0.045061318412156062, # Heating Oil 'HU': 0.017154313062463938, # Unleaded Gasoline 'JE': 0.013948949613401812, # JPY e-mini 'JY': DEFAULT_ETA, # JPY 'LB': 0.06146586386903994, # Lumber 'LC': 0.055853801862858619, # Live Cattle 'LH': 0.057557004630219781, # Lean Hogs 'MB': DEFAULT_ETA, # Municipal Bonds 'MD': DEFAULT_ETA, # SP400 Midcap 'ME': 0.030383767727818548, # MXN 'MG': 0.029579261656151684, # MSCI EAFE mini 'MI': 0.041026288873007355, # SP400 Midcap e-mini 'MS': DEFAULT_ETA, # Soybean e-mini 'MW': 0.052579919663880245, # Wheat e-mini 'ND': DEFAULT_ETA, # Nasdaq100 'NG': 0.047897809233755716, # Natural Gas 'NK': 0.044555435054791433, # Nikkei225 'NQ': 0.044772425085977945, # Nasdaq100 e-mini 'NZ': 0.049170418073872041, # NZD 'OA': 0.056973267232775522, # Oats 'PA': DEFAULT_ETA, # Palladium 'PB': DEFAULT_ETA, # Pork Bellies 'PL': 0.054579379665647493, # Platinum 'QG': DEFAULT_ETA, # Natural Gas e-mini 'QM': DEFAULT_ETA, # Crude Oil e-mini 'RM': 0.037425041244579654, # Russell1000 e-mini 'RR': DEFAULT_ETA, # Rough Rice 'SB': 0.057388160345668134, # Sugar 'SF': 0.047784825569615726, # CHF 'SM': 0.048552860559844223, # Soybean Meal 'SP': DEFAULT_ETA, # SP500 'SV': 0.052691435039931109, # Silver 'SY': 0.052041703657281613, # Soybean 'TB': DEFAULT_ETA, # Treasury Bills 'TN': 0.033363465365262503, # Deliverable Interest Rate Swap 10y 'TS': 0.032908878455069152, # Interest Rate Swap 10y 'TU': 0.063867646063840794, # US 2y 'TY': 0.050586988554700826, # US 10y 'UB': DEFAULT_ETA, # Ultra Tbond 'US': 0.047984179873590722, # US 30y 'VX': DEFAULT_ETA, # VIX 'WC': 0.052636542119329242, # Wheat 'XB': 0.044444916388854484, # RBOB Gasoline 'XG': DEFAULT_ETA, # Gold e-mini 'YM': DEFAULT_ETA, # Dow Jones e-mini 'YS': DEFAULT_ETA, # Silver e-mini }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/constants.py
constants.py
import abc from numpy import vectorize from functools import partial, reduce import operator import pandas as pd from six import with_metaclass, iteritems from collections import namedtuple from toolz import groupby from zipline.utils.enum import enum from zipline.utils.numpy_utils import vectorized_is_element from zipline.assets import Asset Restriction = namedtuple( 'Restriction', ['asset', 'effective_date', 'state'] ) RESTRICTION_STATES = enum( 'ALLOWED', 'FROZEN', ) class Restrictions(with_metaclass(abc.ABCMeta)): """ Abstract restricted list interface, representing a set of assets that an algorithm is restricted from trading. """ @abc.abstractmethod def is_restricted(self, assets, dt): """ Is the asset restricted (RestrictionStates.FROZEN) on the given dt? Parameters ---------- asset : Asset of iterable of Assets The asset(s) for which we are querying a restriction dt : pd.Timestamp The timestamp of the restriction query Returns ------- is_restricted : bool or pd.Series[bool] indexed by asset Is the asset or assets restricted on this dt? """ raise NotImplementedError('is_restricted') def __or__(self, other_restriction): """Base implementation for combining two restrictions. """ # If the right side is a _UnionRestrictions, defers to the # _UnionRestrictions implementation of `|`, which intelligently # flattens restricted lists if isinstance(other_restriction, _UnionRestrictions): return other_restriction | self return _UnionRestrictions([self, other_restriction]) class _UnionRestrictions(Restrictions): """ A union of a number of sub restrictions. Parameters ---------- sub_restrictions : iterable of Restrictions (but not _UnionRestrictions) The Restrictions to be added together Notes ----- - Consumers should not construct instances of this class directly, but instead use the `|` operator to combine restrictions """ def __new__(cls, sub_restrictions): # Filter out NoRestrictions and deal with resulting cases involving # one or zero sub_restrictions sub_restrictions = [ r for r in sub_restrictions if not isinstance(r, NoRestrictions) ] if len(sub_restrictions) == 0: return NoRestrictions() elif len(sub_restrictions) == 1: return sub_restrictions[0] new_instance = super(_UnionRestrictions, cls).__new__(cls) new_instance.sub_restrictions = sub_restrictions return new_instance def __or__(self, other_restriction): """ Overrides the base implementation for combining two restrictions, of which the left side is a _UnionRestrictions. """ # Flatten the underlying sub restrictions of _UnionRestrictions if isinstance(other_restriction, _UnionRestrictions): new_sub_restrictions = \ self.sub_restrictions + other_restriction.sub_restrictions else: new_sub_restrictions = self.sub_restrictions + [other_restriction] return _UnionRestrictions(new_sub_restrictions) def is_restricted(self, assets, dt): if isinstance(assets, Asset): return any( r.is_restricted(assets, dt) for r in self.sub_restrictions ) return reduce( operator.or_, (r.is_restricted(assets, dt) for r in self.sub_restrictions) ) class NoRestrictions(Restrictions): """ A no-op restrictions that contains no restrictions. """ def is_restricted(self, assets, dt): if isinstance(assets, Asset): return False return pd.Series(index=pd.Index(assets), data=False) class StaticRestrictions(Restrictions): """ Static restrictions stored in memory that are constant regardless of dt for each asset. Parameters ---------- restricted_list : iterable of assets The assets to be restricted """ def __init__(self, restricted_list): self._restricted_set = frozenset(restricted_list) def is_restricted(self, assets, dt): """ An asset is restricted for all dts if it is in the static list. """ if isinstance(assets, Asset): return assets in self._restricted_set return pd.Series( index=pd.Index(assets), data=vectorized_is_element(assets, self._restricted_set) ) class HistoricalRestrictions(Restrictions): """ Historical restrictions stored in memory with effective dates for each asset. Parameters ---------- restrictions : iterable of namedtuple Restriction The restrictions, each defined by an asset, effective date and state """ def __init__(self, restrictions): # A dict mapping each asset to its restrictions, which are sorted by # ascending order of effective_date self._restrictions_by_asset = { asset: sorted( restrictions_for_asset, key=lambda x: x.effective_date ) for asset, restrictions_for_asset in iteritems(groupby(lambda x: x.asset, restrictions)) } def is_restricted(self, assets, dt): """ Returns whether or not an asset or iterable of assets is restricted on a dt. """ if isinstance(assets, Asset): return self._is_restricted_for_asset(assets, dt) is_restricted = partial(self._is_restricted_for_asset, dt=dt) return pd.Series( index=pd.Index(assets), data=vectorize(is_restricted, otypes=[bool])(assets) ) def _is_restricted_for_asset(self, asset, dt): state = RESTRICTION_STATES.ALLOWED for r in self._restrictions_by_asset.get(asset, ()): if r.effective_date > dt: break state = r.state return state == RESTRICTION_STATES.FROZEN class SecurityListRestrictions(Restrictions): """ Restrictions based on a security list. Parameters ---------- restrictions : zipline.utils.security_list.SecurityList The restrictions defined by a SecurityList """ def __init__(self, security_list_by_dt): self.current_securities = security_list_by_dt.current_securities def is_restricted(self, assets, dt): securities_in_list = self.current_securities(dt) if isinstance(assets, Asset): return assets in securities_in_list return pd.Series( index=pd.Index(assets), data=vectorized_is_element(assets, securities_in_list) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/asset_restrictions.py
asset_restrictions.py
from abc import abstractmethod from collections import defaultdict from six import with_metaclass from toolz import merge from zipline.assets import Equity, Future from zipline.finance.constants import FUTURE_EXCHANGE_FEES_BY_SYMBOL from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta from zipline.utils.dummy import DummyMapping DEFAULT_PER_SHARE_COST = 0.001 # 0.1 cents per share DEFAULT_PER_CONTRACT_COST = 0.85 # $0.85 per future contract DEFAULT_PER_DOLLAR_COST = 0.0015 # 0.15 cents per dollar DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE = 0.0 # $0 per trade DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE = 0.0 # $0 per trade class CommissionModel(with_metaclass(FinancialModelMeta)): """Abstract base class for commission models. Commission models are responsible for accepting order/transaction pairs and calculating how much commission should be charged to an algorithm's account on each transaction. To implement a new commission model, create a subclass of :class:`~zipline.finance.commission.CommissionModel` and implement :meth:`calculate`. """ # Asset types that are compatible with the given model. allowed_asset_types = (Equity, Future) @abstractmethod def calculate(self, order, transaction): """ Calculate the amount of commission to charge on ``order`` as a result of ``transaction``. Parameters ---------- order : zipline.finance.order.Order The order being processed. The ``commission`` field of ``order`` is a float indicating the amount of commission already charged on this order. transaction : zipline.finance.transaction.Transaction The transaction being processed. A single order may generate multiple transactions if there isn't enough volume in a given bar to fill the full amount requested in the order. Returns ------- amount_charged : float The additional commission, in dollars, that we should attribute to this order. """ raise NotImplementedError('calculate') class NoCommission(CommissionModel): """Model commissions as free. Notes ----- This is primarily used for testing. """ @staticmethod def calculate(order, transaction): return 0.0 class EquityCommissionModel(with_metaclass(AllowedAssetMarker, CommissionModel)): """ Base class for commission models which only support equities. """ allowed_asset_types = (Equity,) class FutureCommissionModel(with_metaclass(AllowedAssetMarker, CommissionModel)): """ Base class for commission models which only support futures. """ allowed_asset_types = (Future,) def calculate_per_unit_commission(order, transaction, cost_per_unit, initial_commission, min_trade_cost): """ If there is a minimum commission: If the order hasn't had a commission paid yet, pay the minimum commission. If the order has paid a commission, start paying additional commission once the minimum commission has been reached. If there is no minimum commission: Pay commission based on number of units in the transaction. """ additional_commission = abs(transaction.amount * cost_per_unit) if order.commission == 0: # no commission paid yet, pay at least the minimum plus a one-time # exchange fee. return max(min_trade_cost, additional_commission + initial_commission) else: # we've already paid some commission, so figure out how much we # would be paying if we only counted per unit. per_unit_total = \ abs(order.filled * cost_per_unit) + \ additional_commission + \ initial_commission if per_unit_total < min_trade_cost: # if we haven't hit the minimum threshold yet, don't pay # additional commission return 0 else: # we've exceeded the threshold, so pay more commission. return per_unit_total - order.commission class PerShare(EquityCommissionModel): """ Calculates a commission for a transaction based on a per share cost with an optional minimum cost per trade. Parameters ---------- cost : float, optional The amount of commissions paid per share traded. Default is one tenth of a cent per share. min_trade_cost : float, optional The minimum amount of commissions paid per trade. Default is no minimum. Notes ----- This is zipline's default commission model for equities. """ def __init__(self, cost=DEFAULT_PER_SHARE_COST, min_trade_cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE): self.cost_per_share = float(cost) self.min_trade_cost = min_trade_cost or 0 def __repr__(self): return ( '{class_name}(cost_per_share={cost_per_share}, ' 'min_trade_cost={min_trade_cost})' .format( class_name=self.__class__.__name__, cost_per_share=self.cost_per_share, min_trade_cost=self.min_trade_cost, ) ) def calculate(self, order, transaction): return calculate_per_unit_commission( order=order, transaction=transaction, cost_per_unit=self.cost_per_share, initial_commission=0, min_trade_cost=self.min_trade_cost, ) class PerContract(FutureCommissionModel): """ Calculates a commission for a transaction based on a per contract cost with an optional minimum cost per trade. Parameters ---------- cost : float or dict The amount of commissions paid per contract traded. If given a float, the commission for all futures contracts is the same. If given a dictionary, it must map root symbols to the commission cost for contracts of that symbol. exchange_fee : float or dict A flat-rate fee charged by the exchange per trade. This value is a constant, one-time charge no matter how many contracts are being traded. If given a float, the fee for all contracts is the same. If given a dictionary, it must map root symbols to the fee for contracts of that symbol. min_trade_cost : float, optional The minimum amount of commissions paid per trade. """ def __init__(self, cost, exchange_fee, min_trade_cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE): # If 'cost' or 'exchange fee' are constants, use a dummy mapping to # treat them as a dictionary that always returns the same value. # NOTE: These dictionary does not handle unknown root symbols, so it # may be worth revisiting this behavior. if isinstance(cost, (int, float)): self._cost_per_contract = DummyMapping(float(cost)) else: # Cost per contract is a dictionary. If the user's dictionary does # not provide a commission cost for a certain contract, fall back # on the pre-defined cost values per root symbol. self._cost_per_contract = defaultdict( lambda: DEFAULT_PER_CONTRACT_COST, **cost ) if isinstance(exchange_fee, (int, float)): self._exchange_fee = DummyMapping(float(exchange_fee)) else: # Exchange fee is a dictionary. If the user's dictionary does not # provide an exchange fee for a certain contract, fall back on the # pre-defined exchange fees per root symbol. self._exchange_fee = merge( FUTURE_EXCHANGE_FEES_BY_SYMBOL, exchange_fee, ) self.min_trade_cost = min_trade_cost or 0 def __repr__(self): if isinstance(self._cost_per_contract, DummyMapping): # Cost per contract is a constant, so extract it. cost_per_contract = self._cost_per_contract['dummy key'] else: cost_per_contract = '<varies>' if isinstance(self._exchange_fee, DummyMapping): # Exchange fee is a constant, so extract it. exchange_fee = self._exchange_fee['dummy key'] else: exchange_fee = '<varies>' return ( '{class_name}(cost_per_contract={cost_per_contract}, ' 'exchange_fee={exchange_fee}, min_trade_cost={min_trade_cost})' .format( class_name=self.__class__.__name__, cost_per_contract=cost_per_contract, exchange_fee=exchange_fee, min_trade_cost=self.min_trade_cost, ) ) def calculate(self, order, transaction): root_symbol = order.asset.root_symbol cost_per_contract = self._cost_per_contract[root_symbol] exchange_fee = self._exchange_fee[root_symbol] return calculate_per_unit_commission( order=order, transaction=transaction, cost_per_unit=cost_per_contract, initial_commission=exchange_fee, min_trade_cost=self.min_trade_cost, ) class PerTrade(CommissionModel): """ Calculates a commission for a transaction based on a per trade cost. For orders that require multiple fills, the full commission is charged to the first fill. Parameters ---------- cost : float, optional The flat amount of commissions paid per equity trade. """ def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_EQUITY_TRADE): """ Cost parameter is the cost of a trade, regardless of share count. $5.00 per trade is fairly typical of discount brokers. """ # Cost needs to be floating point so that calculation using division # logic does not floor to an integer. self.cost = float(cost) def __repr__(self): return '{class_name}(cost_per_trade={cost})'.format( class_name=self.__class__.__name__, cost=self.cost, ) def calculate(self, order, transaction): """ If the order hasn't had a commission paid yet, pay the fixed commission. """ if order.commission == 0: # if the order hasn't had a commission attributed to it yet, # that's what we need to pay. return self.cost else: # order has already had commission attributed, so no more # commission. return 0.0 class PerFutureTrade(PerContract): """ Calculates a commission for a transaction based on a per trade cost. Parameters ---------- cost : float or dict The flat amount of commissions paid per trade, regardless of the number of contracts being traded. If given a float, the commission for all futures contracts is the same. If given a dictionary, it must map root symbols to the commission cost for trading contracts of that symbol. """ def __init__(self, cost=DEFAULT_MINIMUM_COST_PER_FUTURE_TRADE): # The per-trade cost can be represented as the exchange fee in a # per-contract model because the exchange fee is just a one time cost # incurred on the first fill. super(PerFutureTrade, self).__init__( cost=0, exchange_fee=cost, min_trade_cost=0, ) self._cost_per_trade = self._exchange_fee def __repr__(self): if isinstance(self._cost_per_trade, DummyMapping): # Cost per trade is a constant, so extract it. cost_per_trade = self._cost_per_trade['dummy key'] else: cost_per_trade = '<varies>' return '{class_name}(cost_per_trade={cost_per_trade})'.format( class_name=self.__class__.__name__, cost_per_trade=cost_per_trade, ) class PerDollar(EquityCommissionModel): """ Model commissions by applying a fixed cost per dollar transacted. Parameters ---------- cost : float, optional The flat amount of commissions paid per dollar of equities traded. Default is a commission of $0.0015 per dollar transacted. """ def __init__(self, cost=DEFAULT_PER_DOLLAR_COST): """ Cost parameter is the cost of a trade per-dollar. 0.0015 on $1 million means $1,500 commission (=1M * 0.0015) """ self.cost_per_dollar = float(cost) def __repr__(self): return "{class_name}(cost_per_dollar={cost})".format( class_name=self.__class__.__name__, cost=self.cost_per_dollar) def calculate(self, order, transaction): """ Pay commission based on dollar value of shares. """ cost_per_share = transaction.price * self.cost_per_dollar return abs(transaction.amount) * cost_per_share
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/commission.py
commission.py
import abc from sys import float_info from six import with_metaclass from numpy import isfinite import zipline.utils.math_utils as zp_math from zipline.errors import BadOrderParameters from zipline.utils.compat import consistent_round class ExecutionStyle(with_metaclass(abc.ABCMeta)): """Base class for order execution styles. """ _exchange = None @abc.abstractmethod def get_limit_price(self, is_buy): """ Get the limit price for this order. Returns either None or a numerical value >= 0. """ raise NotImplementedError @abc.abstractmethod def get_stop_price(self, is_buy): """ Get the stop price for this order. Returns either None or a numerical value >= 0. """ raise NotImplementedError @property def exchange(self): """ The exchange to which this order should be routed. """ return self._exchange class MarketOrder(ExecutionStyle): """ Execution style for orders to be filled at current market price. This is the default for orders placed with :func:`~zipline.api.order`. """ def __init__(self, exchange=None): self._exchange = exchange def get_limit_price(self, _is_buy): return None def get_stop_price(self, _is_buy): return None class LimitOrder(ExecutionStyle): """ Execution style for orders to be filled at a price equal to or better than a specified limit price. Parameters ---------- limit_price : float Maximum price for buys, or minimum price for sells, at which the order should be filled. """ def __init__(self, limit_price, asset=None, exchange=None): check_stoplimit_prices(limit_price, 'limit') self.limit_price = limit_price self._exchange = exchange self.asset = asset def get_limit_price(self, is_buy): return asymmetric_round_price( self.limit_price, is_buy, tick_size=(0.01 if self.asset is None else self.asset.tick_size) ) def get_stop_price(self, _is_buy): return None class StopOrder(ExecutionStyle): """ Execution style representing a market order to be placed if market price reaches a threshold. Parameters ---------- stop_price : float Price threshold at which the order should be placed. For sells, the order will be placed if market price falls below this value. For buys, the order will be placed if market price rises above this value. """ def __init__(self, stop_price, asset=None, exchange=None): check_stoplimit_prices(stop_price, 'stop') self.stop_price = stop_price self._exchange = exchange self.asset = asset def get_limit_price(self, _is_buy): return None def get_stop_price(self, is_buy): return asymmetric_round_price( self.stop_price, not is_buy, tick_size=(0.01 if self.asset is None else self.asset.tick_size) ) class StopLimitOrder(ExecutionStyle): """ Execution style representing a limit order to be placed if market price reaches a threshold. Parameters ---------- limit_price : float Maximum price for buys, or minimum price for sells, at which the order should be filled, if placed. stop_price : float Price threshold at which the order should be placed. For sells, the order will be placed if market price falls below this value. For buys, the order will be placed if market price rises above this value. """ def __init__(self, limit_price, stop_price, asset=None, exchange=None): check_stoplimit_prices(limit_price, 'limit') check_stoplimit_prices(stop_price, 'stop') self.limit_price = limit_price self.stop_price = stop_price self._exchange = exchange self.asset = asset def get_limit_price(self, is_buy): return asymmetric_round_price( self.limit_price, is_buy, tick_size=(0.01 if self.asset is None else self.asset.tick_size) ) def get_stop_price(self, is_buy): return asymmetric_round_price( self.stop_price, not is_buy, tick_size=(0.01 if self.asset is None else self.asset.tick_size) ) def asymmetric_round_price(price, prefer_round_down, tick_size, diff=0.95): """ Asymmetric rounding function for adjusting prices to the specified number of places in a way that "improves" the price. For limit prices, this means preferring to round down on buys and preferring to round up on sells. For stop prices, it means the reverse. If prefer_round_down == True: When .05 below to .95 above a specified decimal place, use it. If prefer_round_down == False: When .95 below to .05 above a specified decimal place, use it. In math-speak: If prefer_round_down: [<X-1>.0095, X.0195) -> round to X.01. If not prefer_round_down: (<X-1>.0005, X.0105] -> round to X.01. """ precision = zp_math.number_of_decimal_places(tick_size) multiplier = int(tick_size * (10 ** precision)) diff -= 0.5 # shift the difference down diff *= (10 ** -precision) # adjust diff to precision of tick size diff *= multiplier # adjust diff to value of tick_size # Subtracting an epsilon from diff to enforce the open-ness of the upper # bound on buys and the lower bound on sells. Using the actual system # epsilon doesn't quite get there, so use a slightly less epsilon-ey value. epsilon = float_info.epsilon * 10 diff = diff - epsilon # relies on rounding half away from zero, unlike numpy's bankers' rounding rounded = tick_size * consistent_round( (price - (diff if prefer_round_down else -diff)) / tick_size ) if zp_math.tolerant_equals(rounded, 0.0): return 0.0 return rounded def check_stoplimit_prices(price, label): """ Check to make sure the stop/limit prices are reasonable and raise a BadOrderParameters exception if not. """ try: if not isfinite(price): raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, price) ) # This catches arbitrary objects except TypeError: raise BadOrderParameters( msg="Attempted to place an order with a {} price " "of {}.".format(label, type(price)) ) if price < 0: raise BadOrderParameters( msg="Can't place a {} order with a negative price.".format(label) )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/execution.py
execution.py
import logbook import pandas as pd from zipline.utils.memoize import remember_last from zipline.utils.pandas_utils import normalize_date log = logbook.Logger('Trading') DEFAULT_CAPITAL_BASE = 1e5 class SimulationParameters(object): def __init__(self, start_session, end_session, trading_calendar, capital_base=DEFAULT_CAPITAL_BASE, emission_rate='daily', data_frequency='daily', arena='backtest', execution_id=None): # assert type(start_session) == pd.Timestamp # assert type(end_session) == pd.Timestamp assert trading_calendar is not None, \ "Must pass in trading calendar!" assert start_session <= end_session, \ "Period start falls after period end." assert start_session <= trading_calendar.last_trading_session, \ "Period start falls after the last known trading day." assert end_session >= trading_calendar.first_trading_session, \ "Period end falls before the first known trading day." # chop off any minutes or hours on the given start and end dates, # as we only support session labels here (and we represent session # labels as midnight UTC). # self._start_session = normalize_date(start_session) # self._end_session = normalize_date(end_session) self._start_session = start_session self._end_session = end_session self._capital_base = capital_base if execution_id: self._execution_id = execution_id self._emission_rate = emission_rate self._data_frequency = data_frequency # copied to algorithm's environment for runtime access self._arena = arena self._trading_calendar = trading_calendar if not trading_calendar.is_session(self._start_session): # if the start date is not a valid session in this calendar, # push it forward to the first valid session self._start_session = trading_calendar.minute_to_session_label( pd.Timestamp(self._start_session) ) if not trading_calendar.is_session(self._end_session): # if the end date is not a valid session in this calendar, # pull it backward to the last valid session before the given # end date. self._end_session = trading_calendar.minute_to_session_label( pd.Timestamp(self._end_session), direction="previous" ) self._first_open = trading_calendar.open_and_close_for_session( self._start_session )[0] self._last_close = trading_calendar.open_and_close_for_session( self._end_session )[1] @property def capital_base(self): return self._capital_base @property def emission_rate(self): return self._emission_rate @property def data_frequency(self): return self._data_frequency @data_frequency.setter def data_frequency(self, val): self._data_frequency = val @property def arena(self): return self._arena @arena.setter def arena(self, val): self._arena = val @property def start_session(self): return self._start_session @property def end_session(self): return self._end_session @property def first_open(self): return self._first_open @property def last_close(self): return self._last_close @property def trading_calendar(self): return self._trading_calendar @property @remember_last def sessions(self): return self._trading_calendar.sessions_in_range( self.start_session, self.end_session ) def create_new(self, start_session, end_session, data_frequency=None): if data_frequency is None: data_frequency = self.data_frequency return SimulationParameters( start_session, end_session, self._trading_calendar, capital_base=self.capital_base, emission_rate=self.emission_rate, data_frequency=data_frequency, arena=self.arena ) def __repr__(self): return """ {class_name}( start_session={start_session}, end_session={end_session}, capital_base={capital_base}, data_frequency={data_frequency}, emission_rate={emission_rate}, first_open={first_open}, last_close={last_close}, trading_calendar={trading_calendar} )\ """.format(class_name=self.__class__.__name__, start_session=self.start_session, end_session=self.end_session, capital_base=self.capital_base, data_frequency=self.data_frequency, emission_rate=self.emission_rate, first_open=self.first_open, last_close=self.last_close, trading_calendar=self._trading_calendar)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/trading.py
trading.py
from __future__ import division from abc import abstractmethod import math import numpy as np from pandas import isnull from six import with_metaclass from toolz import merge from zipline.assets import Equity, Future from zipline.errors import HistoryWindowStartsBeforeData from zipline.finance.constants import ROOT_SYMBOL_TO_ETA from zipline.finance.shared import AllowedAssetMarker, FinancialModelMeta from zipline.finance.transaction import create_transaction from zipline.utils.cache import ExpiringCache from zipline.utils.dummy import DummyMapping from zipline.utils.input_validation import (expect_bounded, expect_strictly_bounded) SELL = 1 << 0 BUY = 1 << 1 STOP = 1 << 2 LIMIT = 1 << 3 SQRT_252 = math.sqrt(252) DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT = 0.025 DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT = 0.05 class LiquidityExceeded(Exception): pass def fill_price_worse_than_limit_price(fill_price, order): """ Checks whether the fill price is worse than the order's limit price. Parameters ---------- fill_price: float The price to check. order: zipline.finance.order.Order The order whose limit price to check. Returns ------- bool: Whether the fill price is above the limit price (for a buy) or below the limit price (for a sell). """ if order.limit: # this is tricky! if an order with a limit price has reached # the limit price, we will try to fill the order. do not fill # these shares if the impacted price is worse than the limit # price. return early to avoid creating the transaction. # buy order is worse if the impacted price is greater than # the limit price. sell order is worse if the impacted price # is less than the limit price if (order.direction > 0 and fill_price > order.limit) or \ (order.direction < 0 and fill_price < order.limit): return True return False class SlippageModel(with_metaclass(FinancialModelMeta)): """ Abstract base class for slippage models. Slippage models are responsible for the rates and prices at which orders fill during a simulation. To implement a new slippage model, create a subclass of :class:`~zipline.finance.slippage.SlippageModel` and implement :meth:`process_order`. Methods ------- process_order(data, order) Attributes ---------- volume_for_bar : int Number of shares that have already been filled for the currently-filling asset in the current minute. This attribute is maintained automatically by the base class. It can be used by subclasses to keep track of the total amount filled if there are multiple open orders for a single asset. Notes ----- Subclasses that define their own constructors should call ``super(<subclass name>, self).__init__()`` before performing other initialization. """ # Asset types that are compatible with the given model. allowed_asset_types = (Equity, Future) def __init__(self): self._volume_for_bar = 0 @property def volume_for_bar(self): return self._volume_for_bar @abstractmethod def process_order(self, data, order): """ Compute the number of shares and price to fill for ``order`` in the current minute. Parameters ---------- data : zipline.protocol.BarData The data for the given bar. order : zipline.finance.order.Order The order to simulate. Returns ------- execution_price : float The price of the fill. execution_volume : int The number of shares that should be filled. Must be between ``0`` and ``order.amount - order.filled``. If the amount filled is less than the amount remaining, ``order`` will remain open and will be passed again to this method in the next minute. Raises ------ zipline.finance.slippage.LiquidityExceeded May be raised if no more orders should be processed for the current asset during the current bar. Notes ----- Before this method is called, :attr:`volume_for_bar` will be set to the number of shares that have already been filled for ``order.asset`` in the current minute. :meth:`process_order` is not called by the base class on bars for which there was no historical volume. """ raise NotImplementedError('process_order') def simulate(self, data, asset, orders_for_asset): self._volume_for_bar = 0 volume = data.current(asset, "volume") if volume == 0: return # can use the close price, since we verified there's volume in this # bar. price = data.current(asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END dt = data.current_dt for order in orders_for_asset: if order.open_amount == 0: continue order.check_triggers(price, dt) if not order.triggered: continue txn = None try: execution_price, execution_volume = \ self.process_order(data, order) if execution_price is not None: txn = create_transaction( order, data.current_dt, execution_price, execution_volume ) except LiquidityExceeded: break if txn: self._volume_for_bar += abs(txn.amount) yield order, txn def asdict(self): return self.__dict__ class NoSlippage(SlippageModel): """A slippage model where all orders fill immediately and completely at the current close price. Notes ----- This is primarily used for testing. """ @staticmethod def process_order(data, order): return ( data.current(order.asset, 'close'), order.amount, ) class EquitySlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)): """ Base class for slippage models which only support equities. """ allowed_asset_types = (Equity,) class FutureSlippageModel(with_metaclass(AllowedAssetMarker, SlippageModel)): """ Base class for slippage models which only support futures. """ allowed_asset_types = (Future,) class VolumeShareSlippage(SlippageModel): """ Model slippage as a quadratic function of percentage of historical volume. Orders to buy will be filled at:: price * (1 + price_impact * (volume_share ** 2)) Orders to sell will be filled at:: price * (1 - price_impact * (volume_share ** 2)) where ``price`` is the close price for the bar, and ``volume_share`` is the percentage of minutely volume filled, up to a max of ``volume_limit``. Parameters ---------- volume_limit : float, optional Maximum percent of historical volume that can fill in each bar. 0.5 means 50% of historical volume. 1.0 means 100%. Default is 0.025 (i.e., 2.5%). price_impact : float, optional Scaling coefficient for price impact. Larger values will result in more simulated price impact. Smaller values will result in less simulated price impact. Default is 0.1. """ def __init__(self, volume_limit=DEFAULT_EQUITY_VOLUME_SLIPPAGE_BAR_LIMIT, price_impact=0.1): super(VolumeShareSlippage, self).__init__() self.volume_limit = volume_limit self.price_impact = price_impact def __repr__(self): return """ {class_name}( volume_limit={volume_limit}, price_impact={price_impact}) """.strip().format(class_name=self.__class__.__name__, volume_limit=self.volume_limit, price_impact=self.price_impact) def process_order(self, data, order): volume = data.current(order.asset, "volume") max_volume = self.volume_limit * volume # price impact accounts for the total volume of transactions # created against the current minute bar remaining_volume = max_volume - self.volume_for_bar if remaining_volume < 1: # we can't fill any more transactions raise LiquidityExceeded() # the current order amount will be the min of the # volume available in the bar or the open amount. cur_volume = int(min(remaining_volume, abs(order.open_amount))) if cur_volume < 1: return None, None # tally the current amount into our total amount ordered. # total amount will be used to calculate price impact total_volume = self.volume_for_bar + cur_volume volume_share = min(total_volume / volume, self.volume_limit) price = data.current(order.asset, "close") # BEGIN # # Remove this block after fixing data to ensure volume always has # corresponding price. if isnull(price): return # END simulated_impact = volume_share ** 2 \ * math.copysign(self.price_impact, order.direction) \ * price impacted_price = price + simulated_impact if fill_price_worse_than_limit_price(impacted_price, order): return None, None return ( impacted_price, math.copysign(cur_volume, order.direction) ) class FixedSlippage(SlippageModel): """ Simple model assuming a fixed-size spread for all assets. Parameters ---------- spread : float, optional Size of the assumed spread for all assets. Orders to buy will be filled at ``close + (spread / 2)``. Orders to sell will be filled at ``close - (spread / 2)``. Notes ----- This model does not impose limits on the size of fills. An order for an asset will always be filled as soon as any trading activity occurs in the order's asset, even if the size of the order is greater than the historical volume. """ def __init__(self, spread=0.0): super(FixedSlippage, self).__init__() self.spread = spread def __repr__(self): return '{class_name}(spread={spread})'.format( class_name=self.__class__.__name__, spread=self.spread, ) def process_order(self, data, order): price = data.current(order.asset, "close") return ( price + (self.spread / 2.0 * order.direction), order.amount ) class MarketImpactBase(SlippageModel): """ Base class for slippage models which compute a simulated price impact according to a history lookback. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 10.0 / 10000 def __init__(self): super(MarketImpactBase, self).__init__() self._window_data_cache = ExpiringCache() @abstractmethod def get_txn_volume(self, data, order): """ Return the number of shares we would like to order in this minute. Parameters ---------- data : BarData order : Order Return ------ int : the number of shares """ raise NotImplementedError('get_txn_volume') @abstractmethod def get_simulated_impact(self, order, current_price, current_volume, txn_volume, mean_volume, volatility): """ Calculate simulated price impact. Parameters ---------- order : The order being processed. current_price : Current price of the asset being ordered. current_volume : Volume of the asset being ordered for the current bar. txn_volume : Number of shares/contracts being ordered. mean_volume : Trailing ADV of the asset. volatility : Annualized daily volatility of returns. Return ------ int : impact on the current price. """ raise NotImplementedError('get_simulated_impact') def process_order(self, data, order): if order.open_amount == 0: return None, None minute_data = data.current(order.asset, ['volume', 'high', 'low']) mean_volume, volatility = self._get_window_data(data, order.asset, 20) # Price to use is the average of the minute bar's open and close. price = np.mean([minute_data['high'], minute_data['low']]) volume = minute_data['volume'] if not volume: return None, None txn_volume = int( min(self.get_txn_volume(data, order), abs(order.open_amount)) ) # If the computed transaction volume is zero or a decimal value, 'int' # will round it down to zero. In that case just bail. if txn_volume == 0: return None, None if mean_volume == 0 or np.isnan(volatility): # If this is the first day the contract exists or there is no # volume history, default to a conservative estimate of impact. simulated_impact = price * self.NO_DATA_VOLATILITY_SLIPPAGE_IMPACT else: simulated_impact = self.get_simulated_impact( order=order, current_price=price, current_volume=volume, txn_volume=txn_volume, mean_volume=mean_volume, volatility=volatility, ) impacted_price = \ price + math.copysign(simulated_impact, order.direction) if fill_price_worse_than_limit_price(impacted_price, order): return None, None return impacted_price, math.copysign(txn_volume, order.direction) def _get_window_data(self, data, asset, window_length): """ Internal utility method to return the trailing mean volume over the past 'window_length' days, and volatility of close prices for a specific asset. Parameters ---------- data : The BarData from which to fetch the daily windows. asset : The Asset whose data we are fetching. window_length : Number of days of history used to calculate the mean volume and close price volatility. Returns ------- (mean volume, volatility) """ try: values = self._window_data_cache.get(asset, data.current_session) except KeyError: try: # Add a day because we want 'window_length' complete days, # excluding the current day. volume_history = data.history( asset, 'volume', window_length + 1, '1d', ) close_history = data.history( asset, 'close', window_length + 1, '1d', ) except HistoryWindowStartsBeforeData: # If there is not enough data to do a full history call, return # values as if there was no data. return 0, np.NaN # Exclude the first value of the percent change array because it is # always just NaN. close_volatility = close_history[:-1].pct_change()[1:].std( skipna=False, ) values = { 'volume': volume_history[:-1].mean(), 'close': close_volatility * SQRT_252, } self._window_data_cache.set(asset, values, data.current_session) return values['volume'], values['close'] class VolatilityVolumeShare(MarketImpactBase): """ Model slippage for futures contracts according to the following formula: new_price = price + (price * MI / 10000), where 'MI' is market impact, which is defined as: MI = eta * sigma * sqrt(psi) - ``eta`` is a constant which varies by root symbol. - ``sigma`` is 20-day annualized volatility. - ``psi`` is the volume traded in the given bar divided by 20-day ADV. Parameters ---------- volume_limit : float Maximum percentage (as a decimal) of a bar's total volume that can be traded. eta : float or dict Constant used in the market impact formula. If given a float, the eta for all futures contracts is the same. If given a dictionary, it must map root symbols to the eta for contracts of that symbol. """ NO_DATA_VOLATILITY_SLIPPAGE_IMPACT = 7.5 / 10000 allowed_asset_types = (Future,) def __init__(self, volume_limit, eta=ROOT_SYMBOL_TO_ETA): super(VolatilityVolumeShare, self).__init__() self.volume_limit = volume_limit # If 'eta' is a constant, use a dummy mapping to treat it as a # dictionary that always returns the same value. # NOTE: This dictionary does not handle unknown root symbols, so it may # be worth revisiting this behavior. if isinstance(eta, (int, float)): self._eta = DummyMapping(float(eta)) else: # Eta is a dictionary. If the user's dictionary does not provide a # value for a certain contract, fall back on the pre-defined eta # values per root symbol. self._eta = merge(ROOT_SYMBOL_TO_ETA, eta) def __repr__(self): if isinstance(self._eta, DummyMapping): # Eta is a constant, so extract it. eta = self._eta['dummy key'] else: eta = '<varies>' return '{class_name}(volume_limit={volume_limit}, eta={eta})'.format( class_name=self.__class__.__name__, volume_limit=self.volume_limit, eta=eta, ) def get_simulated_impact(self, order, current_price, current_volume, txn_volume, mean_volume, volatility): eta = self._eta[order.asset.root_symbol] psi = txn_volume / mean_volume market_impact = eta * volatility * math.sqrt(psi) # We divide by 10,000 because this model computes to basis points. # To convert from bps to % we need to divide by 100, then again to # convert from % to fraction. return (current_price * market_impact) / 10000 def get_txn_volume(self, data, order): volume = data.current(order.asset, 'volume') return volume * self.volume_limit class FixedBasisPointsSlippage(SlippageModel): """ Model slippage as a fixed percentage difference from historical minutely close price, limiting the size of fills to a fixed percentage of historical minutely volume. Orders to buy are filled at:: historical_price * (1 + (basis_points * 0.0001)) Orders to sell are filled at:: historical_price * (1 - (basis_points * 0.0001)) Fill sizes are capped at:: historical_volume * volume_limit Parameters ---------- basis_points : float, optional Number of basis points of slippage to apply for each fill. Default is 5 basis points. volume_limit : float, optional Fraction of trading volume that can be filled each minute. Default is 10% of trading volume. Notes ----- - A basis point is one one-hundredth of a percent. - This class, default-constructed, is zipline's default slippage model for equities. """ @expect_bounded( basis_points=(0, None), __funcname='FixedBasisPointsSlippage', ) @expect_strictly_bounded( volume_limit=(0, None), __funcname='FixedBasisPointsSlippage', ) def __init__(self, basis_points=5.0, volume_limit=0.1): super(FixedBasisPointsSlippage, self).__init__() self.basis_points = basis_points self.percentage = self.basis_points / 10000.0 self.volume_limit = volume_limit def __repr__(self): return """ {class_name}( basis_points={basis_points}, volume_limit={volume_limit}, ) """.strip().format( class_name=self.__class__.__name__, basis_points=self.basis_points, volume_limit=self.volume_limit, ) def process_order(self, data, order): volume = data.current(order.asset, "volume") max_volume = int(self.volume_limit * volume) price = data.current(order.asset, "close") shares_to_fill = min(abs(order.open_amount), max_volume - self.volume_for_bar) if shares_to_fill == 0: raise LiquidityExceeded() return ( price + price * (self.percentage * order.direction), shares_to_fill * order.direction )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/slippage.py
slippage.py
import math import uuid from six import text_type import zipline.protocol as zp from zipline.assets import Asset from zipline.utils.enum import enum from zipline.utils.input_validation import expect_types ORDER_STATUS = enum( 'OPEN', 'FILLED', 'CANCELLED', 'REJECTED', 'HELD', ) SELL = 1 << 0 BUY = 1 << 1 STOP = 1 << 2 LIMIT = 1 << 3 ORDER_FIELDS_TO_IGNORE = {'type', 'direction', '_status', 'asset'} class Order(object): # using __slots__ to save on memory usage. Simulations can create many # Order objects and we keep them all in memory, so it's worthwhile trying # to cut down on the memory footprint of this object. __slots__ = ["id", "dt", "reason", "created", "asset", "amount", "filled", "commission", "_status", "stop", "limit", "stop_reached", "limit_reached", "direction", "type", "broker_order_id"] @expect_types(asset=Asset) def __init__(self, dt, asset, amount, stop=None, limit=None, filled=0, commission=0, id=None): """ @dt - datetime.datetime that the order was placed @asset - asset for the order. @amount - the number of shares to buy/sell a positive sign indicates a buy a negative sign indicates a sell @filled - how many shares of the order have been filled so far """ # get a string representation of the uuid. self.id = self.make_id() if id is None else id self.dt = dt self.reason = None self.created = dt self.asset = asset self.amount = amount self.filled = filled self.commission = commission self._status = ORDER_STATUS.OPEN self.stop = stop self.limit = limit self.stop_reached = False self.limit_reached = False self.direction = math.copysign(1, self.amount) self.type = zp.DATASOURCE_TYPE.ORDER self.broker_order_id = None @staticmethod def make_id(): return uuid.uuid4().hex def to_dict(self): dct = {name: getattr(self, name) for name in self.__slots__ if name not in ORDER_FIELDS_TO_IGNORE} if self.broker_order_id is None: del dct['broker_order_id'] # Adding 'sid' for backwards compatibility with downstream consumers. dct['sid'] = self.asset dct['status'] = self.status return dct @property def sid(self): # For backwards compatibility because we pass this object to # custom slippage models. return self.asset def to_api_obj(self): pydict = self.to_dict() obj = zp.Order(initial_values=pydict) return obj def check_triggers(self, price, dt): """ Update internal state based on price triggers and the trade event's price. """ stop_reached, limit_reached, sl_stop_reached = \ self.check_order_triggers(price) if (stop_reached, limit_reached) \ != (self.stop_reached, self.limit_reached): self.dt = dt self.stop_reached = stop_reached self.limit_reached = limit_reached if sl_stop_reached: # Change the STOP LIMIT order into a LIMIT order self.stop = None def check_order_triggers(self, current_price): """ Given an order and a trade event, return a tuple of (stop_reached, limit_reached). For market orders, will return (False, False). For stop orders, limit_reached will always be False. For limit orders, stop_reached will always be False. For stop limit orders a Boolean is returned to flag that the stop has been reached. Orders that have been triggered already (price targets reached), the order's current values are returned. """ if self.triggered: return (self.stop_reached, self.limit_reached, False) stop_reached = False limit_reached = False sl_stop_reached = False order_type = 0 if self.amount > 0: order_type |= BUY else: order_type |= SELL if self.stop is not None: order_type |= STOP if self.limit is not None: order_type |= LIMIT if order_type == BUY | STOP | LIMIT: if current_price >= self.stop: sl_stop_reached = True if current_price <= self.limit: limit_reached = True elif order_type == SELL | STOP | LIMIT: if current_price <= self.stop: sl_stop_reached = True if current_price >= self.limit: limit_reached = True elif order_type == BUY | STOP: if current_price >= self.stop: stop_reached = True elif order_type == SELL | STOP: if current_price <= self.stop: stop_reached = True elif order_type == BUY | LIMIT: if current_price <= self.limit: limit_reached = True elif order_type == SELL | LIMIT: # This is a SELL LIMIT order if current_price >= self.limit: limit_reached = True return (stop_reached, limit_reached, sl_stop_reached) def handle_split(self, ratio): # update the amount, limit_price, and stop_price # by the split's ratio # info here: http://finra.complinet.com/en/display/display_plain.html? # rbid=2403&element_id=8950&record_id=12208&print=1 # new_share_amount = old_share_amount / ratio # new_price = old_price * ratio self.amount = int(self.amount / ratio) if self.limit is not None: self.limit = round(self.limit * ratio, 2) if self.stop is not None: self.stop = round(self.stop * ratio, 2) @property def status(self): if not self.open_amount: return ORDER_STATUS.FILLED elif self._status == ORDER_STATUS.HELD and self.filled: return ORDER_STATUS.OPEN else: return self._status @status.setter def status(self, status): self._status = status def cancel(self): self.status = ORDER_STATUS.CANCELLED def reject(self, reason=''): self.status = ORDER_STATUS.REJECTED self.reason = reason def hold(self, reason=''): self.status = ORDER_STATUS.HELD self.reason = reason @property def open(self): return self.status in [ORDER_STATUS.OPEN, ORDER_STATUS.HELD] @property def triggered(self): """ For a market order, True. For a stop order, True IFF stop_reached. For a limit order, True IFF limit_reached. """ if self.stop is not None and not self.stop_reached: return False if self.limit is not None and not self.limit_reached: return False return True @property def open_amount(self): return self.amount - self.filled def __repr__(self): """ String representation for this object. """ return "Order(%s)" % self.to_dict().__repr__() def __unicode__(self): """ Unicode representation for this object. """ return text_type(repr(self))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/order.py
order.py
from __future__ import division from math import copysign import numpy as np import logbook from zipline.assets import Future import zipline.protocol as zp log = logbook.Logger('Performance') class Position(object): __slots__ = 'inner_position', 'protocol_position' def __init__(self, asset, amount=0, cost_basis=0.0, last_sale_price=0.0, last_sale_date=None): inner = zp.InnerPosition( asset=asset, amount=amount, cost_basis=cost_basis, last_sale_price=last_sale_price, last_sale_date=last_sale_date, ) object.__setattr__(self, 'inner_position', inner) object.__setattr__(self, 'protocol_position', zp.Position(inner)) def __getattr__(self, attr): return getattr(self.inner_position, attr) def __setattr__(self, attr, value): setattr(self.inner_position, attr, value) def earn_dividend(self, dividend): """ Register the number of shares we held at this dividend's ex date so that we can pay out the correct amount on the dividend's pay date. """ return { 'amount': self.amount * dividend.amount } def earn_stock_dividend(self, stock_dividend): """ Register the number of shares we held at this dividend's ex date so that we can pay out the correct amount on the dividend's pay date. """ return { 'payment_asset': stock_dividend.payment_asset, 'share_count': np.floor( self.amount * float(stock_dividend.ratio) ) } def handle_split(self, asset, ratio): """ Update the position by the split ratio, and return the resulting fractional share that will be converted into cash. Returns the unused cash. """ if self.asset != asset: raise Exception("updating split with the wrong asset!") # adjust the # of shares by the ratio # (if we had 100 shares, and the ratio is 3, # we now have 33 shares) # (old_share_count / ratio = new_share_count) # (old_price * ratio = new_price) # e.g., 33.333 raw_share_count = self.amount / float(ratio) # e.g., 33 full_share_count = np.floor(raw_share_count) # e.g., 0.333 fractional_share_count = raw_share_count - full_share_count # adjust the cost basis to the nearest cent, e.g., 60.0 new_cost_basis = round(self.cost_basis * ratio, 2) self.cost_basis = new_cost_basis self.amount = full_share_count return_cash = round(float(fractional_share_count * new_cost_basis), 2) log.info("after split: " + str(self)) log.info("returning cash: " + str(return_cash)) # return the leftover cash, which will be converted into cash # (rounded to the nearest cent) return return_cash def update(self, txn): if self.asset != txn.asset: raise Exception('updating position with txn for a ' 'different asset') total_shares = self.amount + txn.amount if total_shares == 0: self.cost_basis = 0.0 else: prev_direction = copysign(1, self.amount) txn_direction = copysign(1, txn.amount) if prev_direction != txn_direction: # we're covering a short or closing a position if abs(txn.amount) > abs(self.amount): # we've closed the position and gone short # or covered the short position and gone long self.cost_basis = txn.price else: prev_cost = self.cost_basis * self.amount txn_cost = txn.amount * txn.price total_cost = prev_cost + txn_cost self.cost_basis = total_cost / total_shares # Update the last sale price if txn is # best data we have so far if self.last_sale_date is None or txn.dt > self.last_sale_date: self.last_sale_price = txn.price self.last_sale_date = txn.dt self.amount = total_shares def adjust_commission_cost_basis(self, asset, cost): """ A note about cost-basis in zipline: all positions are considered to share a cost basis, even if they were executed in different transactions with different commission costs, different prices, etc. Due to limitations about how zipline handles positions, zipline will currently spread an externally-delivered commission charge across all shares in a position. """ if asset != self.asset: raise Exception('Updating a commission for a different asset?') if cost == 0.0: return # If we no longer hold this position, there is no cost basis to # adjust. if self.amount == 0: return # We treat cost basis as the share price where we have broken even. # For longs, commissions cause a relatively straight forward increase # in the cost basis. # # For shorts, you actually want to decrease the cost basis because you # break even and earn a profit when the share price decreases. # # Shorts are represented as having a negative `amount`. # # The multiplication and division by `amount` cancel out leaving the # cost_basis positive, while subtracting the commission. prev_cost = self.cost_basis * self.amount if isinstance(asset, Future): cost_to_use = cost / asset.price_multiplier else: cost_to_use = cost new_cost = prev_cost + cost_to_use self.cost_basis = new_cost / self.amount def __repr__(self): template = "asset: {asset}, amount: {amount}, cost_basis: {cost_basis}, \ last_sale_price: {last_sale_price}" return template.format( asset=self.asset, amount=self.amount, cost_basis=self.cost_basis, last_sale_price=self.last_sale_price ) def to_dict(self): """ Creates a dictionary representing the state of this position. Returns a dict object of the form: """ return { 'sid': self.asset, 'amount': self.amount, 'cost_basis': self.cost_basis, 'last_sale_price': self.last_sale_price }
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/position.py
position.py
from __future__ import division from collections import namedtuple, OrderedDict from functools import partial from math import isnan import logbook import numpy as np import pandas as pd from six import iteritems, itervalues, PY2 from zipline.assets import Future from zipline.finance.transaction import Transaction import zipline.protocol as zp from zipline.utils.sentinel import sentinel from .position import Position from ._finance_ext import ( PositionStats, calculate_position_tracker_stats, update_position_last_sale_prices, ) log = logbook.Logger('Performance') class PositionTracker(object): """The current state of the positions held. Parameters ---------- data_frequency : {'daily', 'minute'} The data frequency of the simulation. """ def __init__(self, data_frequency): self.positions = OrderedDict() self._unpaid_dividends = {} self._unpaid_stock_dividends = {} self._positions_store = zp.Positions() self.data_frequency = data_frequency # cache the stats until something alters our positions self._dirty_stats = True self._stats = PositionStats.new() def update_position(self, asset, amount=None, last_sale_price=None, last_sale_date=None, cost_basis=None): self._dirty_stats = True if asset not in self.positions: position = Position(asset) self.positions[asset] = position else: position = self.positions[asset] if amount is not None: position.amount = amount if last_sale_price is not None: position.last_sale_price = last_sale_price if last_sale_date is not None: position.last_sale_date = last_sale_date if cost_basis is not None: position.cost_basis = cost_basis if position.amount == 0: del self.positions[asset] try: # if this position exists in our user-facing dictionary, # remove it as well. del self._positions_store[asset] except KeyError: pass def execute_transaction(self, txn): self._dirty_stats = True asset = txn.asset if asset not in self.positions: position = Position(asset) self.positions[asset] = position else: position = self.positions[asset] position.update(txn) if position.amount == 0: del self.positions[asset] try: # if this position exists in our user-facing dictionary, # remove it as well. del self._positions_store[asset] except KeyError: pass def handle_commission(self, asset, cost): # Adjust the cost basis of the stock if we own it if asset in self.positions: self._dirty_stats = True self.positions[asset].adjust_commission_cost_basis(asset, cost) def handle_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position. """ total_leftover_cash = 0 for asset, ratio in splits: if asset in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[asset] leftover_cash = position.handle_split(asset, ratio) total_leftover_cash += leftover_cash return total_leftover_cash def earn_dividends(self, cash_dividends, stock_dividends): """Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date. Parameters ---------- cash_dividends : iterable of (asset, amount, pay_date) namedtuples stock_dividends: iterable of (asset, payment_asset, ratio, pay_date) namedtuples. """ for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.asset].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.asset ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ] def pay_dividends(self, next_trading_day): """ Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends. """ net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_asset = stock_payment['payment_asset'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the asset if payment_asset in self.positions: position = self.positions[payment_asset] else: position = self.positions[payment_asset] = Position( payment_asset, ) position.amount += share_count return net_cash_payment def maybe_create_close_position_transaction(self, asset, dt, data_portal): if not self.positions.get(asset): return None amount = self.positions.get(asset).amount price = data_portal.get_spot_value( asset, 'price', dt, self.data_frequency) # Get the last traded price if price is no longer available if isnan(price): price = self.positions.get(asset).last_sale_price return Transaction( asset=asset, amount=-amount, dt=dt, price=price, order_id=None, ) def get_positions(self): positions = self._positions_store for asset, pos in iteritems(self.positions): # Adds the new position if we didn't have one before, or overwrite # one we have currently positions[asset] = pos.protocol_position return positions def get_position_list(self): return [ pos.to_dict() for asset, pos in iteritems(self.positions) if pos.amount != 0 ] def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False): self._dirty_stats = True if handle_non_market_minutes: previous_minute = data_portal.trading_calendar.previous_minute(dt) get_price = partial( data_portal.get_adjusted_value, field='price', dt=previous_minute, perspective_dt=dt, data_frequency=self.data_frequency, ) else: get_price = partial( data_portal.get_scalar_asset_spot_value, field='price', dt=dt, data_frequency=self.data_frequency, ) update_position_last_sale_prices(self.positions, get_price, dt) @property def stats(self): """The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. """ if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats if PY2: def move_to_end(ordered_dict, key, last=False): if last: ordered_dict[key] = ordered_dict.pop(key) else: # please don't do this in python 2 ;_; new_first_element = ordered_dict.pop(key) # the items (without the given key) in the order they were inserted items = ordered_dict.items() # reset the ordered_dict to re-insert in the new order ordered_dict.clear() ordered_dict[key] = new_first_element # add the items back in their original order ordered_dict.update(items) else: move_to_end = OrderedDict.move_to_end PeriodStats = namedtuple( 'PeriodStats', 'net_liquidation gross_leverage net_leverage', ) not_overridden = sentinel( 'not_overridden', 'Mark that an account field has not been overridden', ) class Ledger(object): """The ledger tracks all orders and transactions as well as the current state of the portfolio and positions. Attributes ---------- portfolio : zipline.protocol.Portfolio The updated portfolio being managed. account : zipline.protocol.Account The updated account being managed. position_tracker : PositionTracker The current set of positions. todays_returns : float The current day's returns. In minute emission mode, this is the partial day's returns. In daily emission mode, this is ``daily_returns[session]``. daily_returns_series : pd.Series The daily returns series. Days that have not yet finished will hold a value of ``np.nan``. daily_returns_array : np.ndarray The daily returns as an ndarray. Days that have not yet finished will hold a value of ``np.nan``. """ def __init__(self, trading_sessions, capital_base, data_frequency): if len(trading_sessions): start = trading_sessions[0] else: start = None # Have some fields of the portfolio changed? This should be accessed # through ``self._dirty_portfolio`` self.__dirty_portfolio = False self._immutable_portfolio = zp.Portfolio(start, capital_base) self._portfolio = zp.MutableView(self._immutable_portfolio) self.daily_returns_series = pd.Series( np.nan, index=trading_sessions, ) # Get a view into the storage of the returns series. Metrics # can access this directly in minute mode for performance reasons. self.daily_returns_array = self.daily_returns_series.values self._previous_total_returns = 0 # this is a component of the cache key for the account self._position_stats = None # Have some fields of the account changed? self._dirty_account = True self._immutable_account = zp.Account() self._account = zp.MutableView(self._immutable_account) # The broker blotter can override some fields on the account. This is # way to tangled up at the moment but we aren't fixing it today. self._account_overrides = {} self.position_tracker = PositionTracker(data_frequency) self._processed_transactions = {} self._orders_by_modified = {} self._orders_by_id = OrderedDict() # Keyed by asset, the previous last sale price of positions with # payouts on price differences, e.g. Futures. # # This dt is not the previous minute to the minute for which the # calculation is done, but the last sale price either before the period # start, or when the price at execution. self._payout_last_sale_prices = {} @property def todays_returns(self): # compute today's returns in returns space instead of portfolio-value # space to work even when we have capital changes return ( (self.portfolio.returns + 1) / (self._previous_total_returns + 1) - 1 ) @property def _dirty_portfolio(self): return self.__dirty_portfolio @_dirty_portfolio.setter def _dirty_portfolio(self, value): if value: # marking the portfolio as dirty also marks the account as dirty self.__dirty_portfolio = self._dirty_account = value else: self.__dirty_portfolio = value def start_of_session(self, session_label): self._processed_transactions.clear() self._orders_by_modified.clear() self._orders_by_id.clear() # Save the previous day's total returns so that ``todays_returns`` # produces returns since yesterday. This does not happen in # ``end_of_session`` because we want ``todays_returns`` to produce the # correct value in metric ``end_of_session`` handlers. self._previous_total_returns = self.portfolio.returns def end_of_bar(self, session_ix): # make daily_returns hold the partial returns, this saves many # metrics from doing a concat and copying all of the previous # returns self.daily_returns_array[session_ix] = self.todays_returns def end_of_session(self, session_ix): # save the daily returns time-series self.daily_returns_series[session_ix] = self.todays_returns def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False): self.position_tracker.sync_last_sale_prices( dt, data_portal, handle_non_market_minutes=handle_non_market_minutes, ) self._dirty_portfolio = True @staticmethod def _calculate_payout(multiplier, amount, old_price, price): return (price - old_price) * multiplier * amount def _cash_flow(self, amount): self._dirty_portfolio = True p = self._portfolio p.cash_flow += amount p.cash += amount def process_transaction(self, transaction): """Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute. """ asset = transaction.asset if isinstance(asset, Future): try: old_price = self._payout_last_sale_prices[asset] except KeyError: self._payout_last_sale_prices[asset] = transaction.price else: position = self.position_tracker.positions[asset] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( asset.price_multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[asset] else: self._payout_last_sale_prices[asset] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict] def process_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list[(Asset, float)] A list of splits. Each split is a tuple of (asset, ratio). """ leftover_cash = self.position_tracker.handle_splits(splits) if leftover_cash > 0: self._cash_flow(leftover_cash) def process_order(self, order): """Keep track of an order that was placed. Parameters ---------- order : zp.Order The order to record. """ try: dt_orders = self._orders_by_modified[order.dt] except KeyError: self._orders_by_modified[order.dt] = OrderedDict([ (order.id, order), ]) self._orders_by_id[order.id] = order else: self._orders_by_id[order.id] = dt_orders[order.id] = order # to preserve the order of the orders by modified date move_to_end(dt_orders, order.id, last=True) move_to_end(self._orders_by_id, order.id, last=True) def process_commission(self, commission): """Process the commission. Parameters ---------- commission : zp.Event The commission being paid. """ asset = commission['asset'] cost = commission['cost'] self.position_tracker.handle_commission(asset, cost) self._cash_flow(-cost) def close_position(self, asset, dt, data_portal): txn = self.position_tracker.maybe_create_close_position_transaction( asset, dt, data_portal, ) if txn is not None: self.process_transaction(txn) def process_dividends(self, next_session, asset_finder, adjustment_reader): """Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session """ position_tracker = self.position_tracker # Earn dividends whose ex_date is the next trading day. We need to # check if we own any of these stocks so we know to pay them out when # the pay date comes. held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) # Earning a dividend just marks that we need to get paid out on # the dividend's pay-date. This does not affect our cash yet. position_tracker.earn_dividends( cash_dividends, stock_dividends, ) # Pay out the dividends whose pay-date is the next session. This does # affect out cash. self._cash_flow( position_tracker.pay_dividends( next_session, ), ) def capital_change(self, change_amount): self.update_portfolio() portfolio = self._portfolio # we update the cash and total value so this is not dirty portfolio.portfolio_value += change_amount portfolio.cash += change_amount def transactions(self, dt=None): """Retrieve the dict-form of all of the transactions in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up transactions for. If not passed, or None is explicitly passed, all of the transactions will be returned. Returns ------- transactions : list[dict] The transaction information. """ if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, []) def orders(self, dt=None): """Retrieve the dict-form of all of the orders in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up order for. If not passed, or None is explicitly passed, all of the orders will be returned. Returns ------- orders : list[dict] The order information. """ if dt is None: # orders by id is already flattened return [o.to_dict() for o in itervalues(self._orders_by_id)] return [ o.to_dict() for o in itervalues(self._orders_by_modified.get(dt, {})) ] @property def positions(self): return self.position_tracker.get_position_list() def _get_payout_total(self, positions): calculate_payout = self._calculate_payout payout_last_sale_prices = self._payout_last_sale_prices total = 0 for asset, old_price in iteritems(payout_last_sale_prices): position = positions[asset] payout_last_sale_prices[asset] = price = position.last_sale_price amount = position.amount total += calculate_payout( asset.price_multiplier, amount, old_price, price, ) return total def update_portfolio(self): """Force a computation of the current portfolio state. """ if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False @property def portfolio(self): """Compute the current portfolio. Notes ----- This is cached, repeated access will not recompute the portfolio until the portfolio may have changed. """ self.update_portfolio() return self._immutable_portfolio def calculate_period_stats(self): position_stats = self.position_tracker.stats portfolio_value = self.portfolio.portfolio_value if portfolio_value == 0: gross_leverage = net_leverage = np.inf else: gross_leverage = position_stats.gross_exposure / portfolio_value net_leverage = position_stats.net_exposure / portfolio_value return portfolio_value, gross_leverage, net_leverage def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): """Override fields on ``self.account``. """ # mark that the portfolio is dirty to override the fields again self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs['self'] @property def account(self): if self._dirty_account: portfolio = self.portfolio account = self._account # If no attribute is found in the ``_account_overrides`` resort to # the following default values. If an attribute is found use the # existing value. For instance, a broker may provide updates to # these attributes. In this case we do not want to over write the # broker values with the default values. account.settled_cash = portfolio.cash account.accrued_interest = 0.0 account.buying_power = np.inf account.equity_with_loan = portfolio.portfolio_value account.total_positions_value = ( portfolio.portfolio_value - portfolio.cash ) account.total_positions_exposure = ( portfolio.positions_exposure ) account.regt_equity = portfolio.cash account.regt_margin = np.inf account.initial_margin_requirement = 0.0 account.maintenance_margin_requirement = 0.0 account.available_funds = portfolio.cash account.excess_liquidity = portfolio.cash account.cushion = ( (portfolio.cash / portfolio.portfolio_value) if portfolio.portfolio_value else np.nan ) account.day_trades_remaining = np.inf (account.net_liquidation, account.gross_leverage, account.net_leverage) = self.calculate_period_stats() account.leverage = account.gross_leverage # apply the overrides for k, v in iteritems(self._account_overrides): setattr(account, k, v) # the account has been fully synced self._dirty_account = False return self._immutable_account
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/ledger.py
ledger.py
from abc import ABCMeta, abstractmethod from six import with_metaclass from zipline.extensions import extensible from zipline.finance.cancel_policy import NeverCancel @extensible class Blotter(with_metaclass(ABCMeta)): def __init__(self, cancel_policy=None): self.cancel_policy = cancel_policy if cancel_policy else NeverCancel() self.current_dt = None def set_date(self, dt): self.current_dt = dt @abstractmethod def order(self, asset, amount, style, order_id=None): """Place an order. Parameters ---------- asset : zipline.assets.Asset The asset that this order is for. amount : int The amount of shares to order. If ``amount`` is positive, this is the number of shares to buy or cover. If ``amount`` is negative, this is the number of shares to sell or short. style : zipline.finance.execution.ExecutionStyle The execution style for the order. order_id : str, optional The unique identifier for this order. Returns ------- order_id : str or None The unique identifier for this order, or None if no order was placed. Notes ----- amount > 0 :: Buy/Cover amount < 0 :: Sell/Short Market order: order(asset, amount) Limit order: order(asset, amount, style=LimitOrder(limit_price)) Stop order: order(asset, amount, style=StopOrder(stop_price)) StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price, stop_price)) """ raise NotImplementedError('order') def batch_order(self, order_arg_lists): """Place a batch of orders. Parameters ---------- order_arg_lists : iterable[tuple] Tuples of args that `order` expects. Returns ------- order_ids : list[str or None] The unique identifier (or None) for each of the orders placed (or not placed). Notes ----- This is required for `Blotter` subclasses to be able to place a batch of orders, instead of being passed the order requests one at a time. """ return [self.order(*order_args) for order_args in order_arg_lists] @abstractmethod def cancel(self, order_id, relay_status=True): """Cancel a single order Parameters ---------- order_id : int The id of the order relay_status : bool Whether or not to record the status of the order """ raise NotImplementedError('cancel') @abstractmethod def cancel_all_orders_for_asset(self, asset, warn=False, relay_status=True): """ Cancel all open orders for a given asset. """ raise NotImplementedError('cancel_all_orders_for_asset') @abstractmethod def execute_cancel_policy(self, event): raise NotImplementedError('execute_cancel_policy') @abstractmethod def reject(self, order_id, reason=''): """ Mark the given order as 'rejected', which is functionally similar to cancelled. The distinction is that rejections are involuntary (and usually include a message from a broker indicating why the order was rejected) while cancels are typically user-driven. """ raise NotImplementedError('reject') @abstractmethod def hold(self, order_id, reason=''): """ Mark the order with order_id as 'held'. Held is functionally similar to 'open'. When a fill (full or partial) arrives, the status will automatically change back to open/filled as necessary. """ raise NotImplementedError('hold') @abstractmethod def process_splits(self, splits): """ Processes a list of splits by modifying any open orders as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- None """ raise NotImplementedError('process_splits') @abstractmethod def get_transactions(self, bar_data): """ Creates a list of transactions based on the current open orders, slippage model, and commission model. Parameters ---------- bar_data: zipline._protocol.BarData Notes ----- This method book-keeps the blotter's open_orders dictionary, so that it is accurate by the time we're done processing open orders. Returns ------- transactions_list: List transactions_list: list of transactions resulting from the current open orders. If there were no open orders, an empty list is returned. commissions_list: List commissions_list: list of commissions resulting from filling the open orders. A commission is an object with "asset" and "cost" parameters. closed_orders: List closed_orders: list of all the orders that have filled. """ raise NotImplementedError('get_transactions') @abstractmethod def prune_orders(self, closed_orders): """ Removes all given orders from the blotter's open_orders list. Parameters ---------- closed_orders: iterable of orders that are closed. Returns ------- None """ raise NotImplementedError('prune_orders')
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/blotter/blotter.py
blotter.py
from logbook import Logger from collections import defaultdict from copy import copy from six import iteritems from zipline.assets import Equity, Future, Asset from .blotter import Blotter from zipline.extensions import register from zipline.finance.order import Order from zipline.finance.slippage import ( DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT, VolatilityVolumeShare, FixedBasisPointsSlippage, ) from zipline.finance.commission import ( DEFAULT_PER_CONTRACT_COST, FUTURE_EXCHANGE_FEES_BY_SYMBOL, PerContract, PerShare, ) from zipline.utils.input_validation import expect_types log = Logger('Blotter') warning_logger = Logger('AlgoWarning') @register(Blotter, 'default') class SimulationBlotter(Blotter): def __init__(self, equity_slippage=None, future_slippage=None, equity_commission=None, future_commission=None, cancel_policy=None): super(SimulationBlotter, self).__init__(cancel_policy=cancel_policy) # these orders are aggregated by asset self.open_orders = defaultdict(list) # keep a dict of orders by their own id self.orders = {} # holding orders that have come in since the last event. self.new_orders = [] self.max_shares = int(1e+11) self.slippage_models = { Equity: equity_slippage or FixedBasisPointsSlippage(), Future: future_slippage or VolatilityVolumeShare( volume_limit=DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT, ), } self.commission_models = { Equity: equity_commission or PerShare(), Future: future_commission or PerContract( cost=DEFAULT_PER_CONTRACT_COST, exchange_fee=FUTURE_EXCHANGE_FEES_BY_SYMBOL, ), } def __repr__(self): return """ {class_name}( slippage_models={slippage_models}, commission_models={commission_models}, open_orders={open_orders}, orders={orders}, new_orders={new_orders}, current_dt={current_dt}) """.strip().format(class_name=self.__class__.__name__, slippage_models=self.slippage_models, commission_models=self.commission_models, open_orders=self.open_orders, orders=self.orders, new_orders=self.new_orders, current_dt=self.current_dt) @expect_types(asset=Asset) def order(self, asset, amount, style, order_id=None): """Place an order. Parameters ---------- asset : zipline.assets.Asset The asset that this order is for. amount : int The amount of shares to order. If ``amount`` is positive, this is the number of shares to buy or cover. If ``amount`` is negative, this is the number of shares to sell or short. style : zipline.finance.execution.ExecutionStyle The execution style for the order. order_id : str, optional The unique identifier for this order. Returns ------- order_id : str or None The unique identifier for this order, or None if no order was placed. Notes ----- amount > 0 :: Buy/Cover amount < 0 :: Sell/Short Market order: order(asset, amount) Limit order: order(asset, amount, style=LimitOrder(limit_price)) Stop order: order(asset, amount, style=StopOrder(stop_price)) StopLimit order: order(asset, amount, style=StopLimitOrder(limit_price, stop_price)) """ # something could be done with amount to further divide # between buy by share count OR buy shares up to a dollar amount # numeric == share count AND "$dollar.cents" == cost amount if amount == 0: # Don't bother placing orders for 0 shares. return None elif amount > self.max_shares: # Arbitrary limit of 100 billion (US) shares will never be # exceeded except by a buggy algorithm. raise OverflowError("Can't order more than %d shares" % self.max_shares) is_buy = (amount > 0) order = Order( dt=self.current_dt, asset=asset, amount=amount, stop=style.get_stop_price(is_buy), limit=style.get_limit_price(is_buy), id=order_id ) self.open_orders[order.asset].append(order) self.orders[order.id] = order self.new_orders.append(order) return order.id def cancel(self, order_id, relay_status=True): if order_id not in self.orders: return cur_order = self.orders[order_id] if cur_order.open: order_list = self.open_orders[cur_order.asset] if cur_order in order_list: order_list.remove(cur_order) if cur_order in self.new_orders: self.new_orders.remove(cur_order) cur_order.cancel() cur_order.dt = self.current_dt if relay_status: # we want this order's new status to be relayed out # along with newly placed orders. self.new_orders.append(cur_order) def cancel_all_orders_for_asset(self, asset, warn=False, relay_status=True): """ Cancel all open orders for a given asset. """ # (sadly) open_orders is a defaultdict, so this will always succeed. orders = self.open_orders[asset] # We're making a copy here because `cancel` mutates the list of open # orders in place. The right thing to do here would be to make # self.open_orders no longer a defaultdict. If we do that, then we # should just remove the orders once here and be done with the matter. for order in orders[:]: self.cancel(order.id, relay_status) if warn: # Message appropriately depending on whether there's # been a partial fill or not. if order.filled > 0: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} has been partially filled. ' '{order_filled} shares were successfully ' 'purchased. {order_failed} shares were not ' 'filled by the end of day and ' 'were canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, order_filled=order.filled, order_failed=order.amount - order.filled, ) ) elif order.filled < 0: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} has been partially filled. ' '{order_filled} shares were successfully ' 'sold. {order_failed} shares were not ' 'filled by the end of day and ' 'were canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, order_filled=-1 * order.filled, order_failed=-1 * (order.amount - order.filled), ) ) else: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} failed to fill by the end of day ' 'and was canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, ) ) assert not orders del self.open_orders[asset] def execute_cancel_policy(self, event): if self.cancel_policy.should_cancel(event): warn = self.cancel_policy.warn_on_cancel for asset in copy(self.open_orders): self.cancel_all_orders_for_asset(asset, warn, relay_status=False) def reject(self, order_id, reason=''): """ Mark the given order as 'rejected', which is functionally similar to cancelled. The distinction is that rejections are involuntary (and usually include a message from a broker indicating why the order was rejected) while cancels are typically user-driven. """ if order_id not in self.orders: return cur_order = self.orders[order_id] order_list = self.open_orders[cur_order.asset] if cur_order in order_list: order_list.remove(cur_order) if cur_order in self.new_orders: self.new_orders.remove(cur_order) cur_order.reject(reason=reason) cur_order.dt = self.current_dt # we want this order's new status to be relayed out # along with newly placed orders. self.new_orders.append(cur_order) def hold(self, order_id, reason=''): """ Mark the order with order_id as 'held'. Held is functionally similar to 'open'. When a fill (full or partial) arrives, the status will automatically change back to open/filled as necessary. """ if order_id not in self.orders: return cur_order = self.orders[order_id] if cur_order.open: if cur_order in self.new_orders: self.new_orders.remove(cur_order) cur_order.hold(reason=reason) cur_order.dt = self.current_dt # we want this order's new status to be relayed out # along with newly placed orders. self.new_orders.append(cur_order) def process_splits(self, splits): """ Processes a list of splits by modifying any open orders as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- None """ for asset, ratio in splits: if asset not in self.open_orders: continue orders_to_modify = self.open_orders[asset] for order in orders_to_modify: order.handle_split(ratio) def get_transactions(self, bar_data): """ Creates a list of transactions based on the current open orders, slippage model, and commission model. Parameters ---------- bar_data: zipline._protocol.BarData Notes ----- This method book-keeps the blotter's open_orders dictionary, so that it is accurate by the time we're done processing open orders. Returns ------- transactions_list: List transactions_list: list of transactions resulting from the current open orders. If there were no open orders, an empty list is returned. commissions_list: List commissions_list: list of commissions resulting from filling the open orders. A commission is an object with "asset" and "cost" parameters. closed_orders: List closed_orders: list of all the orders that have filled. """ closed_orders = [] transactions = [] commissions = [] if self.open_orders: for asset, asset_orders in iteritems(self.open_orders): slippage = self.slippage_models[type(asset)] for order, txn in \ slippage.simulate(bar_data, asset, asset_orders): commission = self.commission_models[type(asset)] additional_commission = commission.calculate(order, txn) if additional_commission > 0: commissions.append({ "asset": order.asset, "order": order, "cost": additional_commission }) order.filled += txn.amount order.commission += additional_commission order.dt = txn.dt transactions.append(txn) if not order.open: closed_orders.append(order) return transactions, commissions, closed_orders def prune_orders(self, closed_orders): """ Removes all given orders from the blotter's open_orders list. Parameters ---------- closed_orders: iterable of orders that are closed. Returns ------- None """ # remove all closed orders from our open_orders dict for order in closed_orders: asset = order.asset asset_orders = self.open_orders[asset] try: asset_orders.remove(order) except ValueError: continue # now clear out the assets from our open_orders dict that have # zero open orders for asset in list(self.open_orders.keys()): if len(self.open_orders[asset]) == 0: del self.open_orders[asset]
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/blotter/simulation_blotter.py
simulation_blotter.py
from logbook import Logger from collections import defaultdict from copy import copy from six import itervalues, iteritems from zipline.assets import Equity, Future, Asset from zipline.finance.blotter.blotter import Blotter from zipline.extensions import register from zipline.finance.order import Order from zipline.finance.slippage import ( DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT, VolatilityVolumeShare, FixedBasisPointsSlippage, ) from zipline.finance.commission import ( DEFAULT_PER_CONTRACT_COST, FUTURE_EXCHANGE_FEES_BY_SYMBOL, PerContract, PerShare, ) from zipline.utils.input_validation import expect_types import pandas as pd log = Logger('Blotter Live') warning_logger = Logger('AlgoWarning') class BlotterLive(Blotter): def __init__(self, data_frequency, broker): self.broker = broker self._processed_closed_orders = [] self._processed_transactions = [] self.data_frequency = data_frequency self.new_orders = [] self.max_shares = int(1e+11) self.slippage_models = { Equity: FixedBasisPointsSlippage(), Future: VolatilityVolumeShare( volume_limit=DEFAULT_FUTURE_VOLUME_SLIPPAGE_BAR_LIMIT, ), } self.commission_models = { Equity: PerShare(), Future: PerContract( cost=DEFAULT_PER_CONTRACT_COST, exchange_fee=FUTURE_EXCHANGE_FEES_BY_SYMBOL, ), } log.info('Initialized blotter_live') def __repr__(self): return """ {class_name}( open_orders={open_orders}, orders={orders}, new_orders={new_orders}, """.strip().format(class_name=self.__class__.__name__, open_orders=self.open_orders, orders=self.orders, new_orders=self.new_orders) @property def orders(self): # IB returns orders from previous days too. # Need to filter for today to be in sync with zipline's behavior # TODO: This logic needs to be extended once GTC orders are supported today = pd.to_datetime('now', utc=True).date() return {order_id: order for order_id, order in iteritems(self.broker.orders) if order.dt.date() == today} @property def open_orders(self): assets = set([order.asset for order in itervalues(self.orders) if order.open]) return { asset: [order for order in itervalues(self.orders) if order.asset == asset and order.open] for asset in assets } @expect_types(asset=Asset) def order(self, asset, amount, style, order_id=None): assert order_id is None if amount == 0: # it's a zipline fuck up.. we shouldn't get orders with amount 0. ignoring this order return '' order = self.broker.order(asset, amount, style) self.new_orders.append(order) return order.id def cancel(self, order_id, relay_status=True): return self.broker.cancel_order(order_id) def execute_cancel_policy(self, event): # Cancellation is handled at the broker pass def cancel_all_orders_for_asset(self, asset, warn=False, relay_status=True): """ Cancel all open orders for a given asset. """ # (sadly) open_orders is a defaultdict, so this will always succeed. orders = self.open_orders[asset] # We're making a copy here because `cancel` mutates the list of open # orders in place. The right thing to do here would be to make # self.open_orders no longer a defaultdict. If we do that, then we # should just remove the orders once here and be done with the matter. for order in orders[:]: self.cancel(order.id, relay_status) if warn: # Message appropriately depending on whether there's # been a partial fill or not. if order.filled > 0: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} has been partially filled. ' '{order_filled} shares were successfully ' 'purchased. {order_failed} shares were not ' 'filled by the end of day and ' 'were canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, order_filled=order.filled, order_failed=order.amount - order.filled, ) ) elif order.filled < 0: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} has been partially filled. ' '{order_filled} shares were successfully ' 'sold. {order_failed} shares were not ' 'filled by the end of day and ' 'were canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, order_filled=-1 * order.filled, order_failed=-1 * (order.amount - order.filled), ) ) else: warning_logger.warn( 'Your order for {order_amt} shares of ' '{order_sym} failed to fill by the end of day ' 'and was canceled.'.format( order_amt=order.amount, order_sym=order.asset.symbol, ) ) assert not orders del self.open_orders[asset] def reject(self, order_id, reason=''): log.warning("Unexpected reject request for {}: '{}'".format( order_id, reason)) def hold(self, order_id, reason=''): log.warning("Unexpected hold request for {}: '{}'".format( order_id, reason)) def get_transactions(self, bar_data): # All returned values from this function are delta between # the previous and actual call. def _list_delta(lst_a, lst_b): return [elem for elem in lst_a if elem not in set(lst_b)] today = pd.to_datetime('now', utc=True).date() all_transactions = [tx for tx in itervalues(self.broker.transactions) if tx.dt.date() == today] new_transactions = _list_delta(all_transactions, self._processed_transactions) self._processed_transactions = all_transactions new_commissions = [{'asset': tx.asset, 'cost': self.broker.orders[tx.order_id].commission, 'order': self.orders[tx.order_id]} for tx in new_transactions] all_closed_orders = [order for order in itervalues(self.orders) if not order.open] new_closed_orders = _list_delta(all_closed_orders, self._processed_closed_orders) self._processed_closed_orders = all_closed_orders return new_transactions, new_commissions, new_closed_orders def prune_orders(self, closed_orders): # Orders are handled at the broker pass def process_splits(self, splits): # Splits are handled at the broker pass
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/blotter/blotter_live.py
blotter_live.py
from __future__ import division import logbook from ..ledger import Ledger from zipline.utils.exploding_object import NamedExplodingObject log = logbook.Logger(__name__) class MetricsTracker(object): """The algorithm's interface to the registered risk and performance metrics. Parameters ---------- trading_calendar : TrandingCalendar The trading calendar used in the simulation. first_session : pd.Timestamp The label of the first trading session in the simulation. last_session : pd.Timestamp The label of the last trading session in the simulation. capital_base : float The starting capital for the simulation. emission_rate : {'daily', 'minute'} How frequently should a performance packet be generated? data_frequency : {'daily', 'minute'} The data frequency of the data portal. asset_finder : AssetFinder The asset finder used in the simulation. metrics : list[Metric] The metrics to track. """ _hooks = ( 'start_of_simulation', 'end_of_simulation', 'start_of_session', 'end_of_session', 'end_of_bar', ) @staticmethod def _execution_open_and_close(calendar, session): open_, close = calendar.open_and_close_for_session(session) execution_open = calendar.execution_time_from_open(open_) execution_close = calendar.execution_time_from_close(close) return execution_open, execution_close def __init__(self, trading_calendar, first_session, last_session, capital_base, emission_rate, data_frequency, asset_finder, metrics): self.emission_rate = emission_rate self._trading_calendar = trading_calendar self._first_session = first_session self._last_session = last_session self._capital_base = capital_base self._asset_finder = asset_finder self._current_session = first_session self._market_open, self._market_close = self._execution_open_and_close( trading_calendar, first_session, ) self._session_count = 0 self._sessions = sessions = trading_calendar.sessions_in_range( first_session, last_session, ) self._total_session_count = len(sessions) self._ledger = Ledger(sessions, capital_base, data_frequency) self._benchmark_source = NamedExplodingObject( 'self._benchmark_source', '_benchmark_source is not set until ``handle_start_of_simulation``' ' is called', ) if emission_rate == 'minute': def progress(self): return 1.0 # a fake value else: def progress(self): return self._session_count / self._total_session_count # don't compare these strings over and over again! self._progress = progress # bind all of the hooks from the passed metric objects. for hook in self._hooks: registered = [] for metric in metrics: try: registered.append(getattr(metric, hook)) except AttributeError: pass def closing_over_loop_variables_is_hard(registered=registered): def hook_implementation(*args, **kwargs): for impl in registered: impl(*args, **kwargs) return hook_implementation hook_implementation = closing_over_loop_variables_is_hard() hook_implementation.__name__ = hook setattr(self, hook, hook_implementation) def handle_start_of_simulation(self, benchmark_source): self._benchmark_source = benchmark_source self.start_of_simulation( self._ledger, self.emission_rate, self._trading_calendar, self._sessions, benchmark_source, ) @property def portfolio(self): return self._ledger.portfolio @property def account(self): return self._ledger.account @property def positions(self): return self._ledger.position_tracker.positions def update_position(self, asset, amount=None, last_sale_price=None, last_sale_date=None, cost_basis=None): self._ledger.position_tracker.update_position( asset, amount, last_sale_price, last_sale_date, cost_basis, ) def override_account_fields(self, **kwargs): self._ledger.override_account_fields(**kwargs) def process_transaction(self, transaction): self._ledger.process_transaction(transaction) def handle_splits(self, splits): self._ledger.process_splits(splits) def process_order(self, event): self._ledger.process_order(event) def process_commission(self, commission): self._ledger.process_commission(commission) def process_close_position(self, asset, dt, data_portal): self._ledger.close_position(asset, dt, data_portal) def capital_change(self, amount): self._ledger.capital_change(amount) def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False): self._ledger.sync_last_sale_prices( dt, data_portal, handle_non_market_minutes=handle_non_market_minutes, ) def handle_minute_close(self, dt, data_portal): """ Handles the close of the given minute in minute emission. Parameters ---------- dt : Timestamp The minute that is ending Returns ------- A minute perf packet. """ self.sync_last_sale_prices(dt, data_portal) packet = { 'period_start': self._first_session, 'period_end': self._last_session, 'capital_base': self._capital_base, 'minute_perf': { 'period_open': self._market_open, 'period_close': dt, }, 'cumulative_perf': { 'period_open': self._first_session, 'period_close': self._last_session, }, 'progress': self._progress(self), 'cumulative_risk_metrics': {}, } ledger = self._ledger ledger.end_of_bar(self._session_count) self.end_of_bar( packet, ledger, dt, self._session_count, data_portal, ) return packet def handle_market_open(self, session_label, data_portal): """Handles the start of each session. Parameters ---------- session_label : Timestamp The label of the session that is about to begin. data_portal : DataPortal The current data portal. """ ledger = self._ledger ledger.start_of_session(session_label) adjustment_reader = data_portal.adjustment_reader if adjustment_reader is not None: # this is None when running with a dataframe source ledger.process_dividends( session_label, self._asset_finder, adjustment_reader, ) self._current_session = session_label cal = self._trading_calendar self._market_open, self._market_close = self._execution_open_and_close( cal, session_label, ) self.start_of_session(ledger, session_label, data_portal) def handle_market_close(self, dt, data_portal): """Handles the close of the given day. Parameters ---------- dt : Timestamp The most recently completed simulation datetime. data_portal : DataPortal The current data portal. Returns ------- A daily perf packet. """ completed_session = self._current_session if self.emission_rate == 'daily': # this method is called for both minutely and daily emissions, but # this chunk of code here only applies for daily emissions. (since # it's done every minute, elsewhere, for minutely emission). self.sync_last_sale_prices(dt, data_portal) session_ix = self._session_count # increment the day counter before we move markers forward. self._session_count += 1 packet = { 'period_start': self._first_session, 'period_end': self._last_session, 'capital_base': self._capital_base, 'daily_perf': { 'period_open': self._market_open, 'period_close': dt, }, 'cumulative_perf': { 'period_open': self._first_session, 'period_close': self._last_session, }, 'progress': self._progress(self), 'cumulative_risk_metrics': {}, } ledger = self._ledger ledger.end_of_session(session_ix) self.end_of_session( packet, ledger, completed_session, session_ix, data_portal, ) return packet def handle_simulation_end(self, data_portal): """ When the simulation is complete, run the full period risk report and send it out on the results socket. """ log.info( 'Simulated {} trading days\n' 'first open: {}\n' 'last close: {}', self._session_count, self._trading_calendar.session_open(self._first_session), self._trading_calendar.session_close(self._last_session), ) packet = {} self.end_of_simulation( packet, self._ledger, self._trading_calendar, self._sessions, data_portal, self._benchmark_source, ) return packet
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/metrics/tracker.py
tracker.py
from functools import partial from zipline.utils.compat import mappingproxy def _make_metrics_set_core(): """Create a family of metrics sets functions that read from the same metrics set mapping. Returns ------- metrics_sets : mappingproxy The mapping of metrics sets to load functions. register : callable The function which registers new metrics sets in the ``metrics_sets`` mapping. unregister : callable The function which deregisters metrics sets from the ``metrics_sets`` mapping. load : callable The function which loads the ingested metrics sets back into memory. """ _metrics_sets = {} # Expose _metrics_sets through a proxy so that users cannot mutate this # accidentally. Users may go through `register` to update this which will # warn when trampling another metrics set. metrics_sets = mappingproxy(_metrics_sets) def register(name, function=None): """Register a new metrics set. Parameters ---------- name : str The name of the metrics set function : callable The callable which produces the metrics set. Notes ----- This may be used as a decorator if only ``name`` is passed. See Also -------- zipline.finance.metrics.get_metrics_set zipline.finance.metrics.unregister_metrics_set """ if function is None: # allow as decorator with just name. return partial(register, name) if name in _metrics_sets: raise ValueError('metrics set %r is already registered' % name) _metrics_sets[name] = function return function def unregister(name): """Unregister an existing metrics set. Parameters ---------- name : str The name of the metrics set See Also -------- zipline.finance.metrics.register_metrics_set """ try: del _metrics_sets[name] except KeyError: raise ValueError( 'metrics set %r was not already registered' % name, ) def load(name): """Return an instance of the metrics set registered with the given name. Returns ------- metrics : set[Metric] A new instance of the metrics set. Raises ------ ValueError Raised when no metrics set is registered to ``name`` """ try: function = _metrics_sets[name] except KeyError: raise ValueError( 'no metrics set registered as %r, options are: %r' % ( name, sorted(_metrics_sets), ), ) return function() return metrics_sets, register, unregister, load metrics_sets, register, unregister, load = _make_metrics_set_core()
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/metrics/core.py
core.py
import empyrical from zipline.utils.deprecate import deprecated from .core import ( metrics_sets, register, unregister, load, ) from .metric import ( AlphaBeta, BenchmarkReturnsAndVolatility, CashFlow, DailyLedgerField, MaxLeverage, NumTradingDays, Orders, PeriodLabel, PNL, Returns, ReturnsStatistic, SimpleLedgerField, StartOfPeriodLedgerField, Transactions, _ConstantCumulativeRiskMetric, _ClassicRiskMetrics, ) from .tracker import MetricsTracker __all__ = ['MetricsTracker', 'unregister', 'metrics_sets', 'load'] register('none', set) @register('default') def default_metrics(): return { Returns(), ReturnsStatistic(empyrical.annual_volatility, 'algo_volatility'), BenchmarkReturnsAndVolatility(), PNL(), CashFlow(), Orders(), Transactions(), SimpleLedgerField('positions'), StartOfPeriodLedgerField( 'portfolio.positions_exposure', 'starting_exposure', ), DailyLedgerField( 'portfolio.positions_exposure', 'ending_exposure', ), StartOfPeriodLedgerField( 'portfolio.positions_value', 'starting_value' ), DailyLedgerField('portfolio.positions_value', 'ending_value'), StartOfPeriodLedgerField('portfolio.cash', 'starting_cash'), DailyLedgerField('portfolio.cash', 'ending_cash'), DailyLedgerField('portfolio.portfolio_value'), DailyLedgerField('position_tracker.stats.longs_count'), DailyLedgerField('position_tracker.stats.shorts_count'), DailyLedgerField('position_tracker.stats.long_value'), DailyLedgerField('position_tracker.stats.short_value'), DailyLedgerField('position_tracker.stats.long_exposure'), DailyLedgerField('position_tracker.stats.short_exposure'), DailyLedgerField('account.gross_leverage'), DailyLedgerField('account.net_leverage'), AlphaBeta(), ReturnsStatistic(empyrical.sharpe_ratio, 'sharpe'), ReturnsStatistic(empyrical.sortino_ratio, 'sortino'), ReturnsStatistic(empyrical.max_drawdown), MaxLeverage(), # Please kill these! _ConstantCumulativeRiskMetric('excess_return', 0.0), _ConstantCumulativeRiskMetric('treasury_period_return', 0.0), NumTradingDays(), PeriodLabel(), } @register('classic') @deprecated( 'The original risk packet has been deprecated and will be removed in a ' 'future release. Please use "default" metrics instead.' ) def classic_metrics(): metrics = default_metrics() metrics.add(_ClassicRiskMetrics()) return metrics
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/metrics/__init__.py
__init__.py
import datetime from functools import partial import operator as op from dateutil.relativedelta import relativedelta import empyrical as ep import numpy as np import pandas as pd from six import iteritems from zipline.utils.exploding_object import NamedExplodingObject from zipline.finance._finance_ext import minute_annual_volatility class SimpleLedgerField(object): """Emit the current value of a ledger field every bar or every session. Parameters ---------- ledger_field : str The ledger field to read. packet_field : str, optional The name of the field to populate in the packet. If not provided, ``ledger_field`` will be used. """ def __init__(self, ledger_field, packet_field=None): self._get_ledger_field = op.attrgetter(ledger_field) if packet_field is None: self._packet_field = ledger_field.rsplit('.', 1)[-1] else: self._packet_field = packet_field def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): packet['minute_perf'][self._packet_field] = self._get_ledger_field( ledger, ) def end_of_session(self, packet, ledger, session, session_ix, data_portal): packet['daily_perf'][self._packet_field] = self._get_ledger_field( ledger, ) class DailyLedgerField(object): """Like :class:`~zipline.finance.metrics.metric.SimpleLedgerField` but also puts the current value in the ``cumulative_perf`` section. Parameters ---------- ledger_field : str The ledger field to read. packet_field : str, optional The name of the field to populate in the packet. If not provided, ``ledger_field`` will be used. """ def __init__(self, ledger_field, packet_field=None): self._get_ledger_field = op.attrgetter(ledger_field) if packet_field is None: self._packet_field = ledger_field.rsplit('.', 1)[-1] else: self._packet_field = packet_field def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): field = self._packet_field packet['cumulative_perf'][field] = packet['minute_perf'][field] = ( self._get_ledger_field(ledger) ) def end_of_session(self, packet, ledger, session, session_ix, data_portal): field = self._packet_field packet['cumulative_perf'][field] = packet['daily_perf'][field] = ( self._get_ledger_field(ledger) ) class StartOfPeriodLedgerField(object): """Keep track of the value of a ledger field at the start of the period. Parameters ---------- ledger_field : str The ledger field to read. packet_field : str, optional The name of the field to populate in the packet. If not provided, ``ledger_field`` will be used. """ def __init__(self, ledger_field, packet_field=None): self._get_ledger_field = op.attrgetter(ledger_field) if packet_field is None: self._packet_field = ledger_field.rsplit('.', 1)[-1] else: self._packet_field = packet_field def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): self._start_of_simulation = self._get_ledger_field(ledger) def start_of_session(self, ledger, session, data_portal): self._previous_day = self._get_ledger_field(ledger) def _end_of_period(self, sub_field, packet, ledger): packet_field = self._packet_field packet['cumulative_perf'][packet_field] = self._start_of_simulation packet[sub_field][packet_field] = self._previous_day def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): self._end_of_period('minute_perf', packet, ledger) def end_of_session(self, packet, ledger, session, session_ix, data_portal): self._end_of_period('daily_perf', packet, ledger) class Returns(object): """Tracks the daily and cumulative returns of the algorithm. """ def _end_of_period(field, packet, ledger, dt, session_ix, data_portal): packet[field]['returns'] = ledger.todays_returns packet['cumulative_perf']['returns'] = ledger.portfolio.returns packet['cumulative_risk_metrics']['algorithm_period_return'] = ( ledger.portfolio.returns ) end_of_bar = partial(_end_of_period, 'minute_perf') end_of_session = partial(_end_of_period, 'daily_perf') class BenchmarkReturnsAndVolatility(object): """Tracks daily and cumulative returns for the benchmark as well as the volatility of the benchmark returns. """ def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): daily_returns_series = benchmark_source.daily_returns( sessions[0], sessions[-1], ) self._daily_returns = daily_returns_array = daily_returns_series.values self._daily_cumulative_returns = ( np.cumprod(1 + daily_returns_array) - 1 ) self._daily_annual_volatility = ( daily_returns_series.expanding(2).std(ddof=1) * np.sqrt(252) ).values if emission_rate == 'daily': self._minute_cumulative_returns = NamedExplodingObject( 'self._minute_cumulative_returns', 'does not exist in daily emission rate', ) self._minute_annual_volatility = NamedExplodingObject( 'self._minute_annual_volatility', 'does not exist in daily emission rate', ) else: open_ = trading_calendar.session_open(sessions[0]) close = trading_calendar.session_close(sessions[-1]) returns = benchmark_source.get_range(open_, close) self._minute_cumulative_returns = ( (1 + returns).cumprod() - 1 ) self._minute_annual_volatility = pd.Series( minute_annual_volatility( returns.index.normalize().view('int64'), returns.values, daily_returns_array, ), index=returns.index, ) def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): r = self._minute_cumulative_returns[dt] if np.isnan(r): r = None packet['cumulative_risk_metrics']['benchmark_period_return'] = r v = self._minute_annual_volatility[dt] if np.isnan(v): v = None packet['cumulative_risk_metrics']['benchmark_volatility'] = v def end_of_session(self, packet, ledger, session, session_ix, data_portal): r = self._daily_cumulative_returns[session_ix] if np.isnan(r): r = None packet['cumulative_risk_metrics']['benchmark_period_return'] = r v = self._daily_annual_volatility[session_ix] if np.isnan(v): v = None packet['cumulative_risk_metrics']['benchmark_volatility'] = v class PNL(object): """Tracks daily and cumulative PNL. """ def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): self._previous_pnl = 0.0 def start_of_session(self, ledger, session, data_portal): self._previous_pnl = ledger.portfolio.pnl def _end_of_period(self, field, packet, ledger): pnl = ledger.portfolio.pnl packet[field]['pnl'] = pnl - self._previous_pnl packet['cumulative_perf']['pnl'] = ledger.portfolio.pnl def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): self._end_of_period('minute_perf', packet, ledger) def end_of_session(self, packet, ledger, session, session_ix, data_portal): self._end_of_period('daily_perf', packet, ledger) class CashFlow(object): """Tracks daily and cumulative cash flow. Notes ----- For historical reasons, this field is named 'capital_used' in the packets. """ def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): self._previous_cash_flow = 0.0 def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): cash_flow = ledger.portfolio.cash_flow packet['minute_perf']['capital_used'] = ( cash_flow - self._previous_cash_flow ) packet['cumulative_perf']['capital_used'] = cash_flow def end_of_session(self, packet, ledger, session, session_ix, data_portal): cash_flow = ledger.portfolio.cash_flow packet['daily_perf']['capital_used'] = ( cash_flow - self._previous_cash_flow ) packet['cumulative_perf']['capital_used'] = cash_flow self._previous_cash_flow = cash_flow class Orders(object): """Tracks daily orders. """ def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): packet['minute_perf']['orders'] = ledger.orders(dt) def end_of_session(self, packet, ledger, dt, session_ix, data_portal): packet['daily_perf']['orders'] = ledger.orders() class Transactions(object): """Tracks daily transactions. """ def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): packet['minute_perf']['transactions'] = ledger.transactions(dt) def end_of_session(self, packet, ledger, dt, session_ix, data_portal): packet['daily_perf']['transactions'] = ledger.transactions() class Positions(object): """Tracks daily positions. """ def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): packet['minute_perf']['positions'] = ledger.positions(dt) def end_of_session(self, packet, ledger, dt, session_ix, data_portal): packet['daily_perf']['positions'] = ledger.positions() class ReturnsStatistic(object): """A metric that reports an end of simulation scalar or time series computed from the algorithm returns. Parameters ---------- function : callable The function to call on the daily returns. field_name : str, optional The name of the field. If not provided, it will be ``function.__name__``. """ def __init__(self, function, field_name=None): if field_name is None: field_name = function.__name__ self._function = function self._field_name = field_name def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): res = self._function(ledger.daily_returns_array[:session_ix + 1]) if not np.isfinite(res): res = None packet['cumulative_risk_metrics'][self._field_name] = res end_of_session = end_of_bar class AlphaBeta(object): """End of simulation alpha and beta to the benchmark. """ def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): self._daily_returns_array = benchmark_source.daily_returns( sessions[0], sessions[-1], ).values def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): risk = packet['cumulative_risk_metrics'] alpha, beta = ep.alpha_beta_aligned( ledger.daily_returns_array[:session_ix + 1], self._daily_returns_array[:session_ix + 1], ) if not np.isfinite(alpha): alpha = None if np.isnan(beta): beta = None risk['alpha'] = alpha risk['beta'] = beta end_of_session = end_of_bar class MaxLeverage(object): """Tracks the maximum account leverage. """ def start_of_simulation(self, *args): self._max_leverage = 0.0 def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): self._max_leverage = max(self._max_leverage, ledger.account.leverage) packet['cumulative_risk_metrics']['max_leverage'] = self._max_leverage end_of_session = end_of_bar class NumTradingDays(object): """Report the number of trading days. """ def start_of_simulation(self, *args): self._num_trading_days = 0 def start_of_session(self, *args): self._num_trading_days += 1 def end_of_bar(self, packet, ledger, dt, session_ix, data_portal): packet['cumulative_risk_metrics']['trading_days'] = ( self._num_trading_days ) end_of_session = end_of_bar class _ConstantCumulativeRiskMetric(object): """A metric which does not change, ever. Notes ----- This exists to maintain the existing structure of the perf packets. We should kill this as soon as possible. """ def __init__(self, field, value): self._field = field self._value = value def end_of_bar(self, packet, *args): packet['cumulative_risk_metrics'][self._field] = self._value def end_of_session(self, packet, *args): packet['cumulative_risk_metrics'][self._field] = self._value class PeriodLabel(object): """Backwards compat, please kill me. """ def start_of_session(self, ledger, session, data_portal): self._label = session.strftime('%Y-%m') def end_of_bar(self, packet, *args): packet['cumulative_risk_metrics']['period_label'] = self._label end_of_session = end_of_bar class _ClassicRiskMetrics(object): """Produces original risk packet. """ def start_of_simulation(self, ledger, emission_rate, trading_calendar, sessions, benchmark_source): self._leverages = np.full_like(sessions, np.nan, dtype='float64') def end_of_session(self, packet, ledger, dt, session_ix, data_portal): self._leverages[session_ix] = ledger.account.leverage @classmethod def risk_metric_period(cls, start_session, end_session, algorithm_returns, benchmark_returns, algorithm_leverages): """ Creates a dictionary representing the state of the risk report. Parameters ---------- start_session : pd.Timestamp Start of period (inclusive) to produce metrics on end_session : pd.Timestamp End of period (inclusive) to produce metrics on algorithm_returns : pd.Series(pd.Timestamp -> float) Series of algorithm returns as of the end of each session benchmark_returns : pd.Series(pd.Timestamp -> float) Series of benchmark returns as of the end of each session algorithm_leverages : pd.Series(pd.Timestamp -> float) Series of algorithm leverages as of the end of each session Returns ------- risk_metric : dict[str, any] Dict of metrics that with fields like: { 'algorithm_period_return': 0.0, 'benchmark_period_return': 0.0, 'treasury_period_return': 0, 'excess_return': 0.0, 'alpha': 0.0, 'beta': 0.0, 'sharpe': 0.0, 'sortino': 0.0, 'period_label': '1970-01', 'trading_days': 0, 'algo_volatility': 0.0, 'benchmark_volatility': 0.0, 'max_drawdown': 0.0, 'max_leverage': 0.0, } """ algorithm_returns = algorithm_returns[ (algorithm_returns.index >= start_session) & (algorithm_returns.index <= end_session) ] # Benchmark needs to be masked to the same dates as the algo returns benchmark_returns = benchmark_returns[ (benchmark_returns.index >= start_session) & (benchmark_returns.index <= algorithm_returns.index[-1]) ] benchmark_period_returns = ep.cum_returns(benchmark_returns).iloc[-1] algorithm_period_returns = ep.cum_returns(algorithm_returns).iloc[-1] alpha, beta = ep.alpha_beta_aligned( algorithm_returns.values, benchmark_returns.values, ) benchmark_volatility = ep.annual_volatility(benchmark_returns) sharpe = ep.sharpe_ratio(algorithm_returns) # The consumer currently expects a 0.0 value for sharpe in period, # this differs from cumulative which was np.nan. # When factoring out the sharpe_ratio, the different return types # were collapsed into `np.nan`. # TODO: Either fix consumer to accept `np.nan` or make the # `sharpe_ratio` return type configurable. # In the meantime, convert nan values to 0.0 if pd.isnull(sharpe): sharpe = 0.0 sortino = ep.sortino_ratio( algorithm_returns.values, _downside_risk=ep.downside_risk(algorithm_returns.values), ) rval = { 'algorithm_period_return': algorithm_period_returns, 'benchmark_period_return': benchmark_period_returns, 'treasury_period_return': 0, 'excess_return': algorithm_period_returns, 'alpha': alpha, 'beta': beta, 'sharpe': sharpe, 'sortino': sortino, 'period_label': end_session.strftime("%Y-%m"), 'trading_days': len(benchmark_returns), 'algo_volatility': ep.annual_volatility(algorithm_returns), 'benchmark_volatility': benchmark_volatility, 'max_drawdown': ep.max_drawdown(algorithm_returns.values), 'max_leverage': algorithm_leverages.max(), } # check if a field in rval is nan or inf, and replace it with None # except period_label which is always a str return { k: ( None if k != 'period_label' and not np.isfinite(v) else v ) for k, v in iteritems(rval) } @classmethod def _periods_in_range(cls, months, end_session, end_date, algorithm_returns, benchmark_returns, algorithm_leverages, months_per): if months.size < months_per: return end_date = end_date for period_timestamp in months: period = period_timestamp.to_period(freq='%dM' % months_per) if period.end_time.tz_localize('utc') > end_date: break yield cls.risk_metric_period( start_session=period.start_time.tz_localize('utc'), end_session=min(period.end_time.tz_localize('utc'), end_session), algorithm_returns=algorithm_returns, benchmark_returns=benchmark_returns, algorithm_leverages=algorithm_leverages, ) @classmethod def risk_report(cls, algorithm_returns, benchmark_returns, algorithm_leverages): start_session = algorithm_returns.index[0] end_session = algorithm_returns.index[-1] end = end_session.replace(day=1) + relativedelta(months=1) months = pd.date_range( start=start_session, # Ensure we have at least one month end=end - datetime.timedelta(days=1), freq='M', tz='utc', ) periods_in_range = partial( cls._periods_in_range, months=months, end_session=end_session, end_date=end, algorithm_returns=algorithm_returns, benchmark_returns=benchmark_returns, algorithm_leverages=algorithm_leverages, ) return { 'one_month': list(periods_in_range(months_per=1)), 'three_month': list(periods_in_range(months_per=3)), 'six_month': list(periods_in_range(months_per=6)), 'twelve_month': list(periods_in_range(months_per=12)), } def end_of_simulation(self, packet, ledger, trading_calendar, sessions, data_portal, benchmark_source): packet.update(self.risk_report( algorithm_returns=ledger.daily_returns_series, benchmark_returns=benchmark_source.daily_returns( sessions[0], sessions[-1], ), algorithm_leverages=self._leverages, ))
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/finance/metrics/metric.py
metric.py
from textwrap import dedent from functools import partial from numpy import ( bool_, dtype, float32, float64, int32, int64, int16, uint16, ndarray, uint32, uint8, ) from six import iteritems from toolz import merge_with from zipline.errors import ( WindowLengthNotPositive, WindowLengthTooLong, ) from zipline.lib.labelarray import LabelArray from zipline.utils.numpy_utils import ( datetime64ns_dtype, float64_dtype, int64_dtype, uint8_dtype, ) from zipline.utils.memoize import lazyval # These class names are all the same because of our bootleg templating system. from ._float64window import AdjustedArrayWindow as Float64Window from ._int64window import AdjustedArrayWindow as Int64Window from ._labelwindow import AdjustedArrayWindow as LabelWindow from ._uint8window import AdjustedArrayWindow as UInt8Window BOOL_DTYPES = frozenset( map(dtype, [bool_, uint8]), ) FLOAT_DTYPES = frozenset( map(dtype, [float32, float64]), ) INT_DTYPES = frozenset( # NOTE: uint64 not supported because it can't be safely cast to int64. map(dtype, [int16, uint16, int32, int64, uint32]), ) DATETIME_DTYPES = frozenset( map(dtype, ['datetime64[ns]', 'datetime64[D]']), ) # We use object arrays for strings. OBJECT_DTYPES = frozenset(map(dtype, ['O'])) STRING_KINDS = frozenset(['S', 'U']) REPRESENTABLE_DTYPES = BOOL_DTYPES.union( FLOAT_DTYPES, INT_DTYPES, DATETIME_DTYPES, OBJECT_DTYPES, ) def can_represent_dtype(dtype): """ Can we build an AdjustedArray for a baseline of `dtype``? """ return dtype in REPRESENTABLE_DTYPES or dtype.kind in STRING_KINDS def is_categorical(dtype): """ Do we represent this dtype with LabelArrays rather than ndarrays? """ return dtype in OBJECT_DTYPES or dtype.kind in STRING_KINDS CONCRETE_WINDOW_TYPES = { float64_dtype: Float64Window, int64_dtype: Int64Window, uint8_dtype: UInt8Window, } def _normalize_array(data, missing_value): """ Coerce buffer data for an AdjustedArray into a standard scalar representation, returning the coerced array and a dict of argument to pass to np.view to use when providing a user-facing view of the underlying data. - float* data is coerced to float64 with viewtype float64. - int32, int64, and uint32 are converted to int64 with viewtype int64. - datetime[*] data is coerced to int64 with a viewtype of datetime64[ns]. - bool_ data is coerced to uint8 with a viewtype of bool_. Parameters ---------- data : np.ndarray Returns ------- coerced, view_kwargs : (np.ndarray, np.dtype) The input ``data`` array coerced to the appropriate pipeline type. This may return the original array or a view over the same data. """ if isinstance(data, LabelArray): return data, {} data_dtype = data.dtype if data_dtype in BOOL_DTYPES: return data.astype(uint8, copy=False), {'dtype': dtype(bool_)} elif data_dtype in FLOAT_DTYPES: return data.astype(float64, copy=False), {'dtype': dtype(float64)} elif data_dtype in INT_DTYPES: return data.astype(int64, copy=False), {'dtype': dtype(int64)} elif is_categorical(data_dtype): if not isinstance(missing_value, LabelArray.SUPPORTED_SCALAR_TYPES): raise TypeError( "Invalid missing_value for categorical array.\n" "Expected None, bytes or unicode. Got %r." % missing_value, ) return LabelArray(data, missing_value), {} elif data_dtype.kind == 'M': try: outarray = data.astype('datetime64[ns]', copy=False).view('int64') return outarray, {'dtype': datetime64ns_dtype} except OverflowError: raise ValueError( "AdjustedArray received a datetime array " "not representable as datetime64[ns].\n" "Min Date: %s\n" "Max Date: %s\n" % (data.min(), data.max()) ) else: raise TypeError( "Don't know how to construct AdjustedArray " "on data of type %s." % data_dtype ) def _merge_simple(adjustment_lists, front_idx, back_idx): """ Merge lists of new and existing adjustments for a given index by appending or prepending new adjustments to existing adjustments. Notes ----- This method is meant to be used with ``toolz.merge_with`` to merge adjustment mappings. In case of a collision ``adjustment_lists`` contains two lists, existing adjustments at index 0 and new adjustments at index 1. When there are no collisions, ``adjustment_lists`` contains a single list. Parameters ---------- adjustment_lists : list[list[Adjustment]] List(s) of new and/or existing adjustments for a given index. front_idx : int Index of list in ``adjustment_lists`` that should be used as baseline in case of a collision. back_idx : int Index of list in ``adjustment_lists`` that should extend baseline list in case of a collision. Returns ------- adjustments : list[Adjustment] List of merged adjustments for a given index. """ if len(adjustment_lists) == 1: return list(adjustment_lists[0]) else: return adjustment_lists[front_idx] + adjustment_lists[back_idx] _merge_methods = { 'append': partial(_merge_simple, front_idx=0, back_idx=1), 'prepend': partial(_merge_simple, front_idx=1, back_idx=0), } class AdjustedArray(object): """ An array that can be iterated with a variable-length window, and which can provide different views on data from different perspectives. Parameters ---------- data : np.ndarray The baseline data values. This array may be mutated by ``traverse(..., copy=False)`` calls. adjustments : dict[int -> list[Adjustment]] A dict mapping row indices to lists of adjustments to apply when we reach that row. missing_value : object A value to use to fill missing data in yielded windows. Should be a value coercible to `data.dtype`. """ __slots__ = ( '_data', '_view_kwargs', 'adjustments', 'missing_value', '_invalidated', '__weakref__', ) def __init__(self, data, adjustments, missing_value): self._data, self._view_kwargs = _normalize_array(data, missing_value) self.adjustments = adjustments self.missing_value = missing_value self._invalidated = False def copy(self): """Copy an adjusted array, deep-copying the ``data`` array. """ if self._invalidated: raise ValueError('cannot copy invalidated AdjustedArray') return type(self)( self.data.copy(order='F'), self.adjustments, self.missing_value, ) def update_adjustments(self, adjustments, method): """ Merge ``adjustments`` with existing adjustments, handling index collisions according to ``method``. Parameters ---------- adjustments : dict[int -> list[Adjustment]] The mapping of row indices to lists of adjustments that should be appended to existing adjustments. method : {'append', 'prepend'} How to handle index collisions. If 'append', new adjustments will be applied after previously-existing adjustments. If 'prepend', new adjustments will be applied before previously-existing adjustments. """ try: merge_func = _merge_methods[method] except KeyError: raise ValueError( "Invalid merge method %s\n" "Valid methods are: %s" % (method, ', '.join(_merge_methods)) ) self.adjustments = merge_with( merge_func, self.adjustments, adjustments, ) @property def data(self): """ The data stored in this array. """ return self._data.view(**self._view_kwargs) @lazyval def dtype(self): """ The dtype of the data stored in this array. """ return self._view_kwargs.get('dtype') or self._data.dtype @lazyval def _iterator_type(self): """ The iterator produced when `traverse` is called on this Array. """ if isinstance(self._data, LabelArray): return LabelWindow return CONCRETE_WINDOW_TYPES[self._data.dtype] def traverse(self, window_length, offset=0, perspective_offset=0, copy=True): """ Produce an iterator rolling windows rows over our data. Each emitted window will have `window_length` rows. Parameters ---------- window_length : int The number of rows in each emitted window. offset : int, optional Number of rows to skip before the first window. Default is 0. perspective_offset : int, optional Number of rows past the end of the current window from which to "view" the underlying data. copy : bool, optional Copy the underlying data. If ``copy=False``, the adjusted array will be invalidated and cannot be traversed again. """ if self._invalidated: raise ValueError('cannot traverse invalidated AdjustedArray') data = self._data if copy: data = data.copy(order='F') else: self._invalidated = True _check_window_params(data, window_length) return self._iterator_type( data, self._view_kwargs, self.adjustments, offset, window_length, perspective_offset, rounding_places=None, ) def inspect(self): """ Return a string representation of the data stored in this array. """ return dedent( """\ Adjusted Array ({dtype}): Data: {data!r} Adjustments: {adjustments} """ ).format( dtype=self.dtype.name, data=self.data, adjustments=self.adjustments, ) def update_labels(self, func): """ Map a function over baseline and adjustment values in place. Note that the baseline data values must be a LabelArray. """ if not isinstance(self.data, LabelArray): raise TypeError( 'update_labels only supported if data is of type LabelArray.' ) # Map the baseline values. self._data = self._data.map(func) # Map each of the adjustments. for _, row_adjustments in iteritems(self.adjustments): for adjustment in row_adjustments: adjustment.value = func(adjustment.value) def ensure_adjusted_array(ndarray_or_adjusted_array, missing_value): if isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, ndarray): return AdjustedArray( ndarray_or_adjusted_array, {}, missing_value, ) else: raise TypeError( "Can't convert %s to AdjustedArray" % type(ndarray_or_adjusted_array).__name__ ) def ensure_ndarray(ndarray_or_adjusted_array): """ Return the input as a numpy ndarray. This is a no-op if the input is already an ndarray. If the input is an adjusted_array, this extracts a read-only view of its internal data buffer. Parameters ---------- ndarray_or_adjusted_array : numpy.ndarray | zipline.data.adjusted_array Returns ------- out : The input, converted to an ndarray. """ if isinstance(ndarray_or_adjusted_array, ndarray): return ndarray_or_adjusted_array elif isinstance(ndarray_or_adjusted_array, AdjustedArray): return ndarray_or_adjusted_array.data else: raise TypeError( "Can't convert %s to ndarray" % type(ndarray_or_adjusted_array).__name__ ) def _check_window_params(data, window_length): """ Check that a window of length `window_length` is well-defined on `data`. Parameters ---------- data : np.ndarray[ndim=2] The array of data to check. window_length : int Length of the desired window. Returns ------- None Raises ------ WindowLengthNotPositive If window_length < 1. WindowLengthTooLong If window_length is greater than the number of rows in `data`. """ if window_length < 1: raise WindowLengthNotPositive(window_length=window_length) if window_length > data.shape[0]: raise WindowLengthTooLong( nrows=data.shape[0], window_length=window_length, )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/lib/adjusted_array.py
adjusted_array.py
from functools import partial, total_ordering from operator import eq, ne import re import numpy as np from numpy import ndarray import pandas as pd from toolz import compose from zipline.utils.compat import unicode from zipline.utils.functional import instance from zipline.utils.preprocess import preprocess from zipline.utils.sentinel import sentinel from zipline.utils.input_validation import ( coerce, expect_kinds, expect_types, optional, ) from zipline.utils.numpy_utils import ( bool_dtype, unsigned_int_dtype_with_size_in_bytes, is_object, object_dtype, ) from zipline.utils.pandas_utils import ignore_pandas_nan_categorical_warning from ._factorize import ( factorize_strings, factorize_strings_known_categories, smallest_uint_that_can_hold, ) def compare_arrays(left, right): "Eq check with a short-circuit for identical objects." return ( left is right or ((left.shape == right.shape) and (left == right).all()) ) def _make_unsupported_method(name): def method(*args, **kwargs): raise NotImplementedError( "Method %s is not supported on LabelArrays." % name ) method.__name__ = name method.__doc__ = "Unsupported LabelArray Method: %s" % name return method class MissingValueMismatch(ValueError): """ Error raised on attempt to perform operations between LabelArrays with mismatched missing_values. """ def __init__(self, left, right): super(MissingValueMismatch, self).__init__( "LabelArray missing_values don't match:" " left={}, right={}".format(left, right) ) class CategoryMismatch(ValueError): """ Error raised on attempt to perform operations between LabelArrays with mismatched category arrays. """ def __init__(self, left, right): (mismatches,) = np.where(left != right) assert len(mismatches), "Not actually a mismatch!" super(CategoryMismatch, self).__init__( "LabelArray categories don't match:\n" "Mismatched Indices: {mismatches}\n" "Left: {left}\n" "Right: {right}".format( mismatches=mismatches, left=left[mismatches], right=right[mismatches], ) ) _NotPassed = sentinel('_NotPassed') class LabelArray(ndarray): """ An ndarray subclass for working with arrays of strings. Factorizes the input array into integers, but overloads equality on strings to check against the factor label. Parameters ---------- values : array-like Array of values that can be passed to np.asarray with dtype=object. missing_value : str Scalar value to treat as 'missing' for operations on ``self``. categories : list[str], optional List of values to use as categories. If not supplied, categories will be inferred as the unique set of entries in ``values``. sort : bool, optional Whether to sort categories. If sort is False and categories is supplied, they are left in the order provided. If sort is False and categories is None, categories will be constructed in a random order. Attributes ---------- categories : ndarray[str] An array containing the unique labels of self. reverse_categories : dict[str -> int] Reverse lookup table for ``categories``. Stores the index in ``categories`` at which each entry each unique entry is found. missing_value : str or None A sentinel missing value with NaN semantics for comparisons. Notes ----- Consumers should be cautious when passing instances of LabelArray to numpy functions. We attempt to disallow as many meaningless operations as possible, but since a LabelArray is just an ndarray of ints with some additional metadata, many numpy functions (for example, trigonometric) will happily accept a LabelArray and treat its values as though they were integers. In a future change, we may be able to disallow more numerical operations by creating a wrapper dtype which doesn't register an implementation for most numpy ufuncs. Until that change is made, consumers of LabelArray should assume that it is undefined behavior to pass a LabelArray to any numpy ufunc that operates on semantically-numerical data. See Also -------- https://docs.scipy.org/doc/numpy-1.11.0/user/basics.subclassing.html """ SUPPORTED_SCALAR_TYPES = (bytes, unicode, type(None)) SUPPORTED_NON_NONE_SCALAR_TYPES = (bytes, unicode) @preprocess( values=coerce(list, partial(np.asarray, dtype=object)), # Coerce ``list`` to ``list`` to make a copy. Code internally may call # ``categories.insert(0, missing_value)`` which will mutate this list # in place. categories=coerce((list, np.ndarray, set), list), ) @expect_types( values=np.ndarray, missing_value=SUPPORTED_SCALAR_TYPES, categories=optional(list), ) @expect_kinds(values=("O", "S", "U")) def __new__(cls, values, missing_value, categories=None, sort=True): # Numpy's fixed-width string types aren't very efficient. Working with # object arrays is faster than bytes or unicode arrays in almost all # cases. if not is_object(values): values = values.astype(object) if values.flags.f_contiguous: ravel_order = 'F' else: ravel_order = 'C' if categories is None: codes, categories, reverse_categories = factorize_strings( values.ravel(ravel_order), missing_value=missing_value, sort=sort, ) else: codes, categories, reverse_categories = ( factorize_strings_known_categories( values.ravel(ravel_order), categories=categories, missing_value=missing_value, sort=sort, ) ) categories.setflags(write=False) return cls.from_codes_and_metadata( codes=codes.reshape(values.shape, order=ravel_order), categories=categories, reverse_categories=reverse_categories, missing_value=missing_value, ) @classmethod def from_codes_and_metadata(cls, codes, categories, reverse_categories, missing_value): """ Rehydrate a LabelArray from the codes and metadata. Parameters ---------- codes : np.ndarray[integral] The codes for the label array. categories : np.ndarray[object] The unique string categories. reverse_categories : dict[str, int] The mapping from category to its code-index. missing_value : any The value used to represent missing data. """ ret = codes.view(type=cls, dtype=np.void) ret._categories = categories ret._reverse_categories = reverse_categories ret._missing_value = missing_value return ret @classmethod def from_categorical(cls, categorical, missing_value=None): """ Create a LabelArray from a pandas categorical. Parameters ---------- categorical : pd.Categorical The categorical object to convert. missing_value : bytes, unicode, or None, optional The missing value to use for this LabelArray. Returns ------- la : LabelArray The LabelArray representation of this categorical. """ return LabelArray( categorical, missing_value, categorical.categories, ) @property def categories(self): # This is a property because it should be immutable. return self._categories @property def reverse_categories(self): # This is a property because it should be immutable. return self._reverse_categories @property def missing_value(self): # This is a property because it should be immutable. return self._missing_value @property def missing_value_code(self): return self.reverse_categories[self.missing_value] def has_label(self, value): return value in self.reverse_categories def __array_finalize__(self, obj): """ Called by Numpy after array construction. There are three cases where this can happen: 1. Someone tries to directly construct a new array by doing:: >>> ndarray.__new__(LabelArray, ...) # doctest: +SKIP In this case, obj will be None. We treat this as an error case and fail. 2. Someone (most likely our own __new__) does:: >>> other_array.view(type=LabelArray) # doctest: +SKIP In this case, `self` will be the new LabelArray instance, and ``obj` will be the array on which ``view`` is being called. The caller of ``obj.view`` is responsible for setting category metadata on ``self`` after we exit. 3. Someone creates a new LabelArray by slicing an existing one. In this case, ``obj`` will be the original LabelArray. We're responsible for copying over the parent array's category metadata. """ if obj is None: raise TypeError( "Direct construction of LabelArrays is not supported." ) # See docstring for an explanation of when these will or will not be # set. self._categories = getattr(obj, 'categories', None) self._reverse_categories = getattr(obj, 'reverse_categories', None) self._missing_value = getattr(obj, 'missing_value', None) def as_int_array(self): """ Convert self into a regular ndarray of ints. This is an O(1) operation. It does not copy the underlying data. """ return self.view( type=ndarray, dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), ) def as_string_array(self): """ Convert self back into an array of strings. This is an O(N) operation. """ return self.categories[self.as_int_array()] def as_categorical(self): """ Coerce self into a pandas categorical. This is only defined on 1D arrays, since that's all pandas supports. """ if len(self.shape) > 1: raise ValueError("Can't convert a 2D array to a categorical.") with ignore_pandas_nan_categorical_warning(): return pd.Categorical.from_codes( self.as_int_array(), # We need to make a copy because pandas >= 0.17 fails if this # buffer isn't writeable. self.categories.copy(), ordered=False, ) def as_categorical_frame(self, index, columns, name=None): """ Coerce self into a pandas DataFrame of Categoricals. """ if len(self.shape) != 2: raise ValueError( "Can't convert a non-2D LabelArray into a DataFrame." ) expected_shape = (len(index), len(columns)) if expected_shape != self.shape: raise ValueError( "Can't construct a DataFrame with provided indices:\n\n" "LabelArray shape is {actual}, but index and columns imply " "that shape should be {expected}.".format( actual=self.shape, expected=expected_shape, ) ) return pd.Series( index=pd.MultiIndex.from_product([index, columns]), data=self.ravel().as_categorical(), name=name, ).unstack() def __setitem__(self, indexer, value): self_categories = self.categories if isinstance(value, self.SUPPORTED_SCALAR_TYPES): value_code = self.reverse_categories.get(value, None) if value_code is None: raise ValueError("%r is not in LabelArray categories." % value) self.as_int_array()[indexer] = value_code elif isinstance(value, LabelArray): value_categories = value.categories if compare_arrays(self_categories, value_categories): return super(LabelArray, self).__setitem__(indexer, value) elif (self.missing_value == value.missing_value and set(value.categories) <= set(self.categories)): rhs = LabelArray.from_codes_and_metadata( *factorize_strings_known_categories( value.as_string_array().ravel(), list(self.categories), self.missing_value, False, ), missing_value=self.missing_value ).reshape(value.shape) super(LabelArray, self).__setitem__(indexer, rhs) else: raise CategoryMismatch(self_categories, value_categories) else: raise NotImplementedError( "Setting into a LabelArray with a value of " "type {type} is not yet supported.".format( type=type(value).__name__, ), ) def set_scalar(self, indexer, value): """ Set scalar value into the array. Parameters ---------- indexer : any The indexer to set the value at. value : str The value to assign at the given locations. Raises ------ ValueError Raised when ``value`` is not a value element of this this label array. """ try: value_code = self.reverse_categories[value] except KeyError: raise ValueError("%r is not in LabelArray categories." % value) self.as_int_array()[indexer] = value_code def __setslice__(self, i, j, sequence): """ This method was deprecated in Python 2.0. It predates slice objects, but Python 2.7.11 still uses it if you implement it, which ndarray does. In newer Pythons, __setitem__ is always called, but we need to manuallly forward in py2. """ self.__setitem__(slice(i, j), sequence) def __getitem__(self, indexer): result = super(LabelArray, self).__getitem__(indexer) if result.ndim: # Result is still a LabelArray, so we can just return it. return result # Result is a scalar value, which will be an instance of np.void. # Map it back to one of our category entries. index = result.view( unsigned_int_dtype_with_size_in_bytes(self.itemsize), ) return self.categories[index] def is_missing(self): """ Like isnan, but checks for locations where we store missing values. """ return ( self.as_int_array() == self.reverse_categories[self.missing_value] ) def not_missing(self): """ Like ~isnan, but checks for locations where we store missing values. """ return ( self.as_int_array() != self.reverse_categories[self.missing_value] ) def _equality_check(op): """ Shared code for __eq__ and __ne__, parameterized on the actual comparison operator to use. """ def method(self, other): if isinstance(other, LabelArray): self_mv = self.missing_value other_mv = other.missing_value if self_mv != other_mv: raise MissingValueMismatch(self_mv, other_mv) self_categories = self.categories other_categories = other.categories if not compare_arrays(self_categories, other_categories): raise CategoryMismatch(self_categories, other_categories) return ( op(self.as_int_array(), other.as_int_array()) & self.not_missing() & other.not_missing() ) elif isinstance(other, ndarray): # Compare to ndarrays as though we were an array of strings. # This is fairly expensive, and should generally be avoided. return op(self.as_string_array(), other) & self.not_missing() elif isinstance(other, self.SUPPORTED_SCALAR_TYPES): i = self._reverse_categories.get(other, -1) return op(self.as_int_array(), i) & self.not_missing() return op(super(LabelArray, self), other) return method __eq__ = _equality_check(eq) __ne__ = _equality_check(ne) del _equality_check def view(self, dtype=_NotPassed, type=_NotPassed): if type is _NotPassed and dtype not in (_NotPassed, self.dtype): raise TypeError("Can't view LabelArray as another dtype.") # The text signature on ndarray.view makes it look like the default # values for dtype and type are `None`, but passing None explicitly has # different semantics than not passing an arg at all, so we reconstruct # the kwargs dict here to simulate the args not being passed at all. kwargs = {} if dtype is not _NotPassed: kwargs['dtype'] = dtype if type is not _NotPassed: kwargs['type'] = type return super(LabelArray, self).view(**kwargs) def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): if dtype == self.dtype: if not subok: array = self.view(type=np.ndarray) else: array = self if copy: return array.copy() return array if dtype == object_dtype: return self.as_string_array() if dtype.kind == 'S': return self.as_string_array().astype( dtype, order=order, casting=casting, subok=subok, copy=copy, ) raise TypeError( '%s can only be converted into object, string, or void,' ' got: %r' % ( type(self).__name__, dtype, ), ) # In general, we support resizing, slicing, and reshaping methods, but not # numeric methods. SUPPORTED_NDARRAY_METHODS = frozenset([ 'astype', 'base', 'compress', 'copy', 'data', 'diagonal', 'dtype', 'flat', 'flatten', 'item', 'itemset', 'itemsize', 'nbytes', 'ndim', 'ravel', 'repeat', 'reshape', 'resize', 'setflags', 'shape', 'size', 'squeeze', 'strides', 'swapaxes', 'take', 'trace', 'transpose', 'view' ]) PUBLIC_NDARRAY_METHODS = frozenset([ s for s in dir(ndarray) if not s.startswith('_') ]) # Generate failing wrappers for all unsupported methods. locals().update( { method: _make_unsupported_method(method) for method in PUBLIC_NDARRAY_METHODS - SUPPORTED_NDARRAY_METHODS } ) def __repr__(self): repr_lines = repr(self.as_string_array()).splitlines() repr_lines[0] = repr_lines[0].replace('array(', 'LabelArray(', 1) repr_lines[-1] = repr_lines[-1].rsplit(',', 1)[0] + ')' # The extra spaces here account for the difference in length between # 'array(' and 'LabelArray('. return '\n '.join(repr_lines) def empty_like(self, shape): """ Make an empty LabelArray with the same categories as ``self``, filled with ``self.missing_value``. """ return type(self).from_codes_and_metadata( codes=np.full( shape, self.reverse_categories[self.missing_value], dtype=unsigned_int_dtype_with_size_in_bytes(self.itemsize), ), categories=self.categories, reverse_categories=self.reverse_categories, missing_value=self.missing_value, ) def map_predicate(self, f): """ Map a function from str -> bool element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always return False. """ # Functions passed to this are of type str -> bool. Don't ever call # them on None, which is the only non-str value we ever store in # categories. if self.missing_value is None: def f_to_use(x): return False if x is None else f(x) else: f_to_use = f # Call f on each unique value in our categories. results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories) # missing_value should produce False no matter what results[self.reverse_categories[self.missing_value]] = False # unpack the results form each unique value into their corresponding # locations in our indices. return results[self.as_int_array()] def map(self, f): """ Map a function from str -> str element-wise over ``self``. ``f`` will be applied exactly once to each non-missing unique value in ``self``. Missing values will always map to ``self.missing_value``. """ # f() should only return None if None is our missing value. if self.missing_value is None: allowed_outtypes = self.SUPPORTED_SCALAR_TYPES else: allowed_outtypes = self.SUPPORTED_NON_NONE_SCALAR_TYPES def f_to_use(x, missing_value=self.missing_value, otypes=allowed_outtypes): # Don't call f on the missing value; those locations don't exist # semantically. We return _sortable_sentinel rather than None # because the np.unique call below sorts the categories array, # which raises an error on Python 3 because None and str aren't # comparable. if x == missing_value: return _sortable_sentinel ret = f(x) if not isinstance(ret, otypes): raise TypeError( "LabelArray.map expected function {f} to return a string" " or None, but got {type} instead.\n" "Value was {value}.".format( f=f.__name__, type=type(ret).__name__, value=ret, ) ) if ret == missing_value: return _sortable_sentinel return ret new_categories_with_duplicates = ( np.vectorize(f_to_use, otypes=[object])(self.categories) ) # If f() maps multiple inputs to the same output, then we can end up # with the same code duplicated multiple times. Compress the categories # by running them through np.unique, and then use the reverse lookup # table to compress codes as well. new_categories, bloated_inverse_index = np.unique( new_categories_with_duplicates, return_inverse=True ) if new_categories[0] is _sortable_sentinel: # f_to_use return _sortable_sentinel for locations that should be # missing values in our output. Since np.unique returns the uniques # in sorted order, and since _sortable_sentinel sorts before any # string, we only need to check the first array entry. new_categories[0] = self.missing_value # `reverse_index` will always be a 64 bit integer even if we can hold a # smaller array. reverse_index = bloated_inverse_index.astype( smallest_uint_that_can_hold(len(new_categories)) ) new_codes = np.take(reverse_index, self.as_int_array()) return self.from_codes_and_metadata( new_codes, new_categories, dict(zip(new_categories, range(len(new_categories)))), missing_value=self.missing_value, ) def startswith(self, prefix): """ Element-wise startswith. Parameters ---------- prefix : str Returns ------- matches : np.ndarray[bool] An array with the same shape as self indicating whether each element of self started with ``prefix``. """ return self.map_predicate(lambda elem: elem.startswith(prefix)) def endswith(self, suffix): """ Elementwise endswith. Parameters ---------- suffix : str Returns ------- matches : np.ndarray[bool] An array with the same shape as self indicating whether each element of self ended with ``suffix`` """ return self.map_predicate(lambda elem: elem.endswith(suffix)) def has_substring(self, substring): """ Elementwise contains. Parameters ---------- substring : str Returns ------- matches : np.ndarray[bool] An array with the same shape as self indicating whether each element of self ended with ``suffix``. """ return self.map_predicate(lambda elem: substring in elem) @preprocess(pattern=coerce(from_=(bytes, unicode), to=re.compile)) def matches(self, pattern): """ Elementwise regex match. Parameters ---------- pattern : str or compiled regex Returns ------- matches : np.ndarray[bool] An array with the same shape as self indicating whether each element of self was matched by ``pattern``. """ return self.map_predicate(compose(bool, pattern.match)) # These types all implement an O(N) __contains__, so pre-emptively # coerce to `set`. @preprocess(container=coerce((list, tuple, np.ndarray), set)) def element_of(self, container): """ Check if each element of self is an of ``container``. Parameters ---------- container : object An object implementing a __contains__ to call on each element of ``self``. Returns ------- is_contained : np.ndarray[bool] An array with the same shape as self indicating whether each element of self was an element of ``container``. """ return self.map_predicate(container.__contains__) @instance # This makes _sortable_sentinel a singleton instance. @total_ordering class _sortable_sentinel(object): """Dummy object that sorts before any other python object. """ def __eq__(self, other): return self is other def __lt__(self, other): return True @expect_types(trues=LabelArray, falses=LabelArray) def labelarray_where(cond, trues, falses): """LabelArray-aware implementation of np.where. """ if trues.missing_value != falses.missing_value: raise ValueError( "Can't compute where on arrays with different missing values." ) strs = np.where(cond, trues.as_string_array(), falses.as_string_array()) return LabelArray(strs, missing_value=trues.missing_value)
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/lib/labelarray.py
labelarray.py
import os import subprocess import networkx as nx def debug_mro_failure(name, bases): graph = build_linearization_graph(name, bases) cycles = sorted(nx.cycles.simple_cycles(graph), key=len) cycle = cycles[0] if os.environ.get('DRAW_MRO_FAILURES'): output_file = name + '.dot' else: output_file = None # Return a nicely formatted error describing the cycle. lines = ["Cycle found when trying to compute MRO for {}:\n".format(name)] for source, dest in list(zip(cycle, cycle[1:])) + [(cycle[-1], cycle[0])]: label = verbosify_label(graph.get_edge_data(source, dest)['label']) lines.append("{} comes before {}: cause={}" .format(source, dest, label)) # Either graphviz graph and tell the user where it went, or tell people how # to enable that feature. lines.append('') if output_file is None: lines.append("Set the DRAW_MRO_FAILURES environment variable to" " render a GraphViz graph of this cycle.") else: try: nx.write_dot(graph.subgraph(cycle), output_file) subprocess.check_call(['dot', '-T', 'svg', '-O', output_file]) lines.append( "GraphViz rendering written to " + output_file + '.svg' ) except Exception as e: lines.append( "Failed to write GraphViz graph. Error was {}".format(e) ) return '\n'.join(lines) def build_linearization_graph(child_name, bases): g = nx.DiGraph() _build_linearization_graph(g, type(child_name, (object,), {}), bases) return g def _build_linearization_graph(g, child, bases): add_implicit_edges(g, child, bases) add_direct_edges(g, child, bases) def add_direct_edges(g, child, bases): # Enforce that bases are ordered in the order that the appear in child's # class declaration. g.add_path([b.__name__ for b in bases], label=child.__name__ + '(O)') # Add direct edges. for base in bases: g.add_edge(child.__name__, base.__name__, label=child.__name__ + '(D)') add_direct_edges(g, base, base.__bases__) def add_implicit_edges(g, child, bases): # Enforce that bases' previous linearizations are preserved. for base in bases: g.add_path( [b.__name__ for b in base.mro()], label=base.__name__ + '(L)', ) VERBOSE_LABELS = { "(D)": "(Direct Subclass)", "(O)": "(Parent Class Order)", "(L)": "(Linearization Order)", } def verbosify_label(label): prefix = label[:-3] suffix = label[-3:] return " ".join([prefix, VERBOSE_LABELS[suffix]])
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/testing/debug.py
debug.py
from collections import OrderedDict from contextlib import contextmanager import datetime from functools import partial import re from nose.tools import ( # noqa assert_almost_equal, assert_almost_equals, assert_dict_contains_subset, assert_false, assert_greater, assert_greater_equal, assert_in, assert_is, assert_is_instance, assert_is_none, assert_is_not, assert_is_not_none, assert_less, assert_less_equal, assert_multi_line_equal, assert_not_almost_equal, assert_not_almost_equals, assert_not_equal, assert_not_equals, assert_not_in, assert_not_is_instance, assert_raises, assert_raises_regexp, assert_regexp_matches, assert_true, assert_tuple_equal, ) import numpy as np import pandas as pd from pandas.testing import ( assert_frame_equal, assert_series_equal, assert_index_equal, ) from six import iteritems, viewkeys, PY2 from six.moves import zip_longest from toolz import dissoc, keyfilter import toolz.curried.operator as op from zipline.assets import Asset from zipline.dispatch import dispatch from zipline.lib.adjustment import Adjustment from zipline.lib.labelarray import LabelArray from zipline.testing.core import ensure_doctest from zipline.utils.compat import getargspec, mappingproxy from zipline.utils.formatting import s from zipline.utils.functional import dzip_exact, instance from zipline.utils.math_utils import tolerant_equals from zipline.utils.numpy_utils import ( assert_array_compare, compare_datetime_arrays, ) @instance @ensure_doctest class wildcard(object): """An object that compares equal to any other object. This is useful when using :func:`~zipline.testing.predicates.assert_equal` with a large recursive structure and some fields to be ignored. Examples -------- >>> wildcard == 5 True >>> wildcard == 'ayy' True # reflected >>> 5 == wildcard True >>> 'ayy' == wildcard True """ @staticmethod def __eq__(other): return True @staticmethod def __ne__(other): return False def __repr__(self): return '<%s>' % type(self).__name__ class instance_of(object): """An object that compares equal to any instance of a given type or types. Parameters ---------- types : type or tuple[type] The types to compare equal to. exact : bool, optional Only compare equal to exact instances, not instances of subclasses? """ def __init__(self, types, exact=False): if not isinstance(types, tuple): types = (types,) for type_ in types: if not isinstance(type_, type): raise TypeError('types must be a type or tuple of types') self.types = types self.exact = exact def __eq__(self, other): if self.exact: return type(other) in self.types return isinstance(other, self.types) def __ne__(self, other): return not self == other def __repr__(self): typenames = tuple(t.__name__ for t in self.types) return '%s(%s%s)' % ( type(self).__name__, ( typenames[0] if len(typenames) == 1 else '(%s)' % ', '.join(typenames) ), ', exact=True' if self.exact else '' ) def keywords(func): """Get the argument names of a function >>> def f(x, y=2): ... pass >>> keywords(f) ['x', 'y'] Notes ----- Taken from odo.utils """ if isinstance(func, type): return keywords(func.__init__) elif isinstance(func, partial): return keywords(func.func) return getargspec(func).args def filter_kwargs(f, kwargs): """Return a dict of valid kwargs for `f` from a subset of `kwargs` Examples -------- >>> def f(a, b=1, c=2): ... return a + b + c ... >>> raw_kwargs = dict(a=1, b=3, d=4) >>> f(**raw_kwargs) Traceback (most recent call last): ... TypeError: f() got an unexpected keyword argument 'd' >>> kwargs = filter_kwargs(f, raw_kwargs) >>> f(**kwargs) 6 Notes ----- Taken from odo.utils """ return keyfilter(op.contains(keywords(f)), kwargs) def _fmt_path(path): """Format the path for final display. Parameters ---------- path : iterable of str The path to the values that are not equal. Returns ------- fmtd : str The formatted path to put into the error message. """ if not path: return '' return 'path: _' + ''.join(path) def _fmt_msg(msg): """Format the message for final display. Parameters ---------- msg : str The message to show to the user to provide additional context. returns ------- fmtd : str The formatted message to put into the error message. """ if not msg: return '' return msg + '\n' def _safe_cls_name(cls): try: return cls.__name__ except AttributeError: return repr(cls) def assert_is_subclass(subcls, cls, msg=''): """Assert that ``subcls`` is a subclass of ``cls``. Parameters ---------- subcls : type The type to check. cls : type The type to check ``subcls`` against. msg : str, optional An extra assertion message to print if this fails. """ assert issubclass(subcls, cls), ( '%s is not a subclass of %s\n%s' % ( _safe_cls_name(subcls), _safe_cls_name(cls), msg, ) ) def assert_is_not_subclass(not_subcls, cls, msg=''): """Assert that ``not_subcls`` is not a subclass of ``cls``. Parameters ---------- not_subcls : type The type to check. cls : type The type to check ``not_subcls`` against. msg : str, optional An extra assertion message to print if this fails. """ assert not issubclass(not_subcls, cls), ( '%s is a subclass of %s\n%s' % ( _safe_cls_name(not_subcls), _safe_cls_name(cls), msg, ) ) def assert_regex(result, expected, msg=''): """Assert that ``expected`` matches the result. Parameters ---------- result : str The string to search. expected : str or compiled regex The pattern to search for in ``result``. msg : str, optional An extra assertion message to print if this fails. """ assert re.search(expected, result), ( '%s%r not found in %r' % (_fmt_msg(msg), expected, result) ) @contextmanager def _assert_raises_helper(do_check, exc_type, msg): try: yield except exc_type as e: do_check(e) else: raise AssertionError('%s%s was not raised' % (_fmt_msg(msg), exc_type)) def assert_raises_regex(exc, pattern, msg=''): """Assert that some exception is raised in a context and that the message matches some pattern. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. pattern : str or compiled regex The pattern to search for in the str of the raised exception. msg : str, optional An extra assertion message to print if this fails. """ def check_exception(e): assert re.search(pattern, str(e)), ( '%s%r not found in %r' % (_fmt_msg(msg), pattern, str(e)) ) return _assert_raises_helper( do_check=check_exception, exc_type=exc, msg=msg, ) def assert_raises_str(exc, expected_str, msg=''): """Assert that some exception is raised in a context and that the message exactly matches some string. Parameters ---------- exc : type or tuple[type] The exception type or types to expect. expected_str : str The expected result of ``str(exception)``. msg : str, optional An extra assertion message to print if this fails. """ def check_exception(e): result = str(e) assert_messages_equal(result, expected_str, msg=msg) return _assert_raises_helper( check_exception, exc_type=exc, msg=msg, ) def make_assert_equal_assertion_error(assertion_message, path, msg): """Create an assertion error formatted for use in ``assert_equal``. Parameters ---------- assertion_message : str The concrete reason for the failure. path : tuple[str] The path leading up to the failure. msg : str The user supplied message. Returns ------- exception_instance : AssertionError The new exception instance. Notes ----- This doesn't raise the exception, it only returns it. """ return AssertionError( '%s%s\n%s' % ( _fmt_msg(msg), assertion_message, _fmt_path(path), ), ) @dispatch(object, object) def assert_equal(result, expected, path=(), msg='', **kwargs): """Assert that two objects are equal using the ``==`` operator. Parameters ---------- result : object The result that came from the function under test. expected : object The expected result. Raises ------ AssertionError Raised when ``result`` is not equal to ``expected``. """ if result != expected: raise make_assert_equal_assertion_error( '%s != %s' % (result, expected), path, msg, ) @assert_equal.register(float, float) def assert_float_equal(result, expected, path=(), msg='', float_rtol=10e-7, float_atol=10e-7, float_equal_nan=True, **kwargs): assert tolerant_equals( result, expected, rtol=float_rtol, atol=float_atol, equal_nan=float_equal_nan, ), '%s%s != %s with rtol=%s and atol=%s%s\n%s' % ( _fmt_msg(msg), result, expected, float_rtol, float_atol, (' (with nan != nan)' if not float_equal_nan else ''), _fmt_path(path), ) def _check_sets(result, expected, msg, path, type_): """Compare two sets. This is used to check dictionary keys and sets. Parameters ---------- result : set expected : set msg : str path : tuple type : str The type of an element. For dict we use ``'key'`` and for set we use ``'element'``. """ if result != expected: if result > expected: diff = result - expected msg = 'extra %s in result: %r' % (s(type_, diff), diff) elif result < expected: diff = expected - result msg = 'result is missing %s: %r' % (s(type_, diff), diff) else: in_result = result - expected in_expected = expected - result msg = '%s only in result: %s\n%s only in expected: %s' % ( s(type_, in_result), in_result, s(type_, in_expected), in_expected, ) raise AssertionError( '%ss do not match\n%s%s' % ( type_, _fmt_msg(msg), _fmt_path(path), ), ) @assert_equal.register(dict, dict) def assert_dict_equal(result, expected, path=(), msg='', **kwargs): _check_sets( viewkeys(result), viewkeys(expected), msg, path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),), 'key', ) failures = [] for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)): try: assert_equal( resultv, expectedv, path=path + ('[%r]' % (k,),), msg=msg, **kwargs ) except AssertionError as e: failures.append(str(e)) if failures: raise AssertionError('\n===\n'.join(failures)) @assert_equal.register(mappingproxy, mappingproxy) def asssert_mappingproxy_equal(result, expected, path=(), msg='', **kwargs): # mappingproxies compare like dict but shouldn't compare to dicts _check_sets( set(result), set(expected), msg, path + ('.keys()',), 'key', ) failures = [] for k, resultv in iteritems(result): # we know this exists because of the _check_sets call above expectedv = expected[k] try: assert_equal( resultv, expectedv, path=path + ('[%r]' % (k,),), msg=msg, **kwargs ) except AssertionError as e: failures.append(str(e)) if failures: raise AssertionError('\n'.join(failures)) @assert_equal.register(OrderedDict, OrderedDict) def assert_ordereddict_equal(result, expected, path=(), **kwargs): assert_sequence_equal( result.items(), expected.items(), path=path + ('.items()',), **kwargs ) @assert_equal.register(list, list) @assert_equal.register(tuple, tuple) def assert_sequence_equal(result, expected, path=(), msg='', **kwargs): result_len = len(result) expected_len = len(expected) assert result_len == expected_len, ( '%s%s lengths do not match: %d != %d\n%s' % ( _fmt_msg(msg), type(result).__name__, result_len, expected_len, _fmt_path(path), ) ) for n, (resultv, expectedv) in enumerate(zip(result, expected)): assert_equal( resultv, expectedv, path=path + ('[%d]' % n,), msg=msg, **kwargs ) @assert_equal.register(set, set) def assert_set_equal(result, expected, path=(), msg='', **kwargs): _check_sets( result, expected, msg, path, 'element', ) @assert_equal.register(np.ndarray, np.ndarray) def assert_array_equal(result, expected, path=(), msg='', array_verbose=True, array_decimal=None, **kwargs): result_dtype = result.dtype expected_dtype = expected.dtype if result_dtype.kind in 'mM' and expected_dtype.kind in 'mM': assert result_dtype == expected_dtype, ( "\nType mismatch:\n\n" "result dtype: %s\n" "expected dtype: %s\n%s" % (result_dtype, expected_dtype, _fmt_path(path)) ) f = partial( assert_array_compare, compare_datetime_arrays, header='Arrays are not equal', ) elif array_decimal is not None and expected_dtype.kind not in {'O', 'S'}: f = partial( np.testing.assert_array_almost_equal, decimal=array_decimal, ) else: f = np.testing.assert_array_equal try: f( result, expected, verbose=array_verbose, err_msg=msg, ) except AssertionError as e: raise AssertionError('\n'.join((str(e), _fmt_path(path)))) @assert_equal.register(LabelArray, LabelArray) def assert_labelarray_equal(result, expected, path=(), **kwargs): assert_equal( result.categories, expected.categories, path=path + ('.categories',), **kwargs ) assert_equal( result.as_int_array(), expected.as_int_array(), path=path + ('.as_int_array()',), **kwargs ) def _register_assert_equal_wrapper(type_, assert_eq): """Register a new check for an ndframe object. Parameters ---------- type_ : type The class to register an ``assert_equal`` dispatch for. assert_eq : callable[type_, type_] The function which checks that if the two ndframes are equal. Returns ------- assert_ndframe_equal : callable[type_, type_] The wrapped function registered with ``assert_equal``. """ @assert_equal.register(type_, type_) def assert_ndframe_equal(result, expected, path=(), msg='', **kwargs): try: assert_eq( result, expected, **filter_kwargs(assert_eq, kwargs) ) except AssertionError as e: raise AssertionError( _fmt_msg(msg) + '\n'.join((str(e), _fmt_path(path))), ) return assert_ndframe_equal assert_frame_equal = _register_assert_equal_wrapper( pd.DataFrame, assert_frame_equal, ) assert_panel_equal = _register_assert_equal_wrapper( pd.Panel, assert_frame_equal, ) assert_series_equal = _register_assert_equal_wrapper( pd.Series, assert_series_equal, ) assert_index_equal = _register_assert_equal_wrapper( pd.Index, assert_index_equal, ) @assert_equal.register(pd.Categorical, pd.Categorical) def assert_categorical_equal(result, expected, path=(), msg='', **kwargs): assert_equal( result.categories, expected.categories, path=path + ('.categories',), msg=msg, **kwargs ) assert_equal( result.codes, expected.codes, path=path + ('.codes',), msg=msg, **kwargs ) @assert_equal.register(Adjustment, Adjustment) def assert_adjustment_equal(result, expected, path=(), **kwargs): for attr in ('first_row', 'last_row', 'first_col', 'last_col', 'value'): assert_equal( getattr(result, attr), getattr(expected, attr), path=path + ('.' + attr,), **kwargs ) @assert_equal.register( (datetime.datetime, np.datetime64), (datetime.datetime, np.datetime64), ) def assert_timestamp_and_datetime_equal(result, expected, path=(), msg='', allow_datetime_coercions=False, compare_nat_equal=True, **kwargs): """ Branch for comparing python datetime (which includes pandas Timestamp) and np.datetime64 as equal. Returns raises unless ``allow_datetime_coercions`` is passed as True. """ assert allow_datetime_coercions or type(result) == type(expected), ( "%sdatetime types (%s, %s) don't match and " "allow_datetime_coercions was not set.\n%s" % ( _fmt_msg(msg), type(result), type(expected), _fmt_path(path), ) ) if isinstance(result, pd.Timestamp) and isinstance(expected, pd.Timestamp): assert_equal( result.tz, expected.tz, path=path + ('.tz',), msg=msg, **kwargs ) result = pd.Timestamp(result) expected = pd.Timestamp(expected) if compare_nat_equal and pd.isnull(result) and pd.isnull(expected): return assert_equal.dispatch(object, object)( result, expected, path=path, msg=msg, **kwargs ) @assert_equal.register(slice, slice) def assert_slice_equal(result, expected, path=(), msg=''): diff_start = ( ('starts are not equal: %s != %s' % (result.start, result.stop)) if result.start != expected.start else '' ) diff_stop = ( ('stops are not equal: %s != %s' % (result.stop, result.stop)) if result.stop != expected.stop else '' ) diff_step = ( ('steps are not equal: %s != %s' % (result.step, result.stop)) if result.step != expected.step else '' ) diffs = diff_start, diff_stop, diff_step assert not any(diffs), '%s%s\n%s' % ( _fmt_msg(msg), '\n'.join(filter(None, diffs)), _fmt_path(path), ) @assert_equal.register(Asset, Asset) def assert_asset_equal(result, expected, path=(), msg='', **kwargs): if type(result) is not type(expected): raise AssertionError( '%sresult type differs from expected type: %s is not %s\n%s', _fmt_msg(msg), type(result).__name__, type(expected).__name__, _fmt_path(path), ) assert_equal( result.to_dict(), expected.to_dict(), path=path + ('.to_dict()',), msg=msg, **kwargs ) def assert_isidentical(result, expected, msg=''): assert result.isidentical(expected), ( '%s%s is not identical to %s' % (_fmt_msg(msg), result, expected) ) def assert_messages_equal(result, expected, msg=''): """Assertion helper for comparing very long strings (e.g. error messages). """ # The arg here is "keepends" which keeps trailing newlines (which # matters for checking trailing whitespace). You can't pass keepends by # name :(. left_lines = result.splitlines(True) right_lines = expected.splitlines(True) iter_lines = enumerate(zip_longest(left_lines, right_lines)) for line, (ll, rl) in iter_lines: if ll != rl: col = index_of_first_difference(ll, rl) raise AssertionError( "{msg}Messages differ on line {line}, col {col}:" "\n{ll!r}\n!=\n{rl!r}".format( msg=_fmt_msg(msg), line=line, col=col, ll=ll, rl=rl ) ) def index_of_first_difference(left, right): """Get the index of the first difference between two strings.""" difflocs = (i for (i, (lc, rc)) in enumerate(zip_longest(left, right)) if lc != rc) try: return next(difflocs) except StopIteration: raise ValueError("Left was equal to right!") try: # pull the dshape cases in from datashape.util.testing import assert_dshape_equal except ImportError: pass else: assert_equal.funcs.update( dissoc(assert_dshape_equal.funcs, (object, object)), )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/testing/predicates.py
predicates.py
import os import sqlite3 from unittest import TestCase import warnings from logbook import NullHandler, Logger import numpy as np import pandas as pd from pandas.errors import PerformanceWarning from six import with_metaclass, iteritems, itervalues, PY2 import responses from toolz import flip, groupby, merge from trading_calendars import ( get_calendar, register_calendar_alias, ) import h5py import zipline from zipline.algorithm import TradingAlgorithm from zipline.assets import Equity, Future from zipline.assets.continuous_futures import CHAIN_PREDICATES from zipline.data.benchmarks import get_benchmark_returns_from_file from zipline.data.fx import DEFAULT_FX_RATE from zipline.finance.asset_restrictions import NoRestrictions from zipline.utils.memoize import classlazyval from zipline.pipeline import SimplePipelineEngine from zipline.pipeline.data import USEquityPricing from zipline.pipeline.data.testing import TestingDataSet from zipline.pipeline.domain import GENERIC, US_EQUITIES from zipline.pipeline.loaders import USEquityPricingLoader from zipline.pipeline.loaders.testing import make_seeded_random_loader from zipline.protocol import BarData from zipline.utils.compat import ExitStack from zipline.utils.paths import ensure_directory, ensure_directory_containing from .core import ( create_daily_bar_data, create_minute_bar_data, make_simple_equity_info, tmp_asset_finder, tmp_dir, write_hdf5_daily_bars, ) from .debug import debug_mro_failure from ..data.adjustments import ( SQLiteAdjustmentReader, SQLiteAdjustmentWriter, ) from ..data.bcolz_daily_bars import ( BcolzDailyBarReader, BcolzDailyBarWriter, ) from ..data.data_portal import ( DataPortal, DEFAULT_MINUTE_HISTORY_PREFETCH, DEFAULT_DAILY_HISTORY_PREFETCH, ) from ..data.fx import ( InMemoryFXRateReader, HDF5FXRateReader, HDF5FXRateWriter, ) from ..data.hdf5_daily_bars import ( HDF5DailyBarReader, HDF5DailyBarWriter, MultiCountryDailyBarReader, ) from ..data.minute_bars import ( BcolzMinuteBarReader, BcolzMinuteBarWriter, US_EQUITIES_MINUTES_PER_DAY, FUTURES_MINUTES_PER_DAY, ) from ..data.resample import ( minute_frame_to_session_frame, MinuteResampleSessionBarReader ) from ..finance.trading import SimulationParameters from ..utils.classproperty import classproperty from ..utils.final import FinalMeta, final from ..utils.memoize import remember_last zipline_dir = os.path.dirname(zipline.__file__) class DebugMROMeta(FinalMeta): """Metaclass that helps debug MRO resolution errors. """ def __new__(mcls, name, bases, clsdict): try: return super(DebugMROMeta, mcls).__new__( mcls, name, bases, clsdict ) except TypeError as e: if "(MRO)" in str(e): msg = debug_mro_failure(name, bases) raise TypeError(msg) else: raise class ZiplineTestCase(with_metaclass(DebugMROMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): # Hold a set of all the "static" attributes on the class. These are # things that are not populated after the class was created like # methods or other class level attributes. cls._static_class_attributes = set(vars(cls)) cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "ZiplineTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super()." ) except BaseException: # Clean up even on KeyboardInterrupt cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' ' Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): # We need to get this before it's deleted by the loop. stack = cls._class_teardown_stack for name in set(vars(cls)) - cls._static_class_attributes: # Remove all of the attributes that were added after the class was # constructed. This cleans up any large test data that is class # scoped while still allowing subclasses to access class level # attributes. delattr(cls, name) stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback, *args, **kwargs): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback, *args, **kwargs) @final def setUp(self): type(self)._in_setup = True self._pre_setup_attrs = set(vars(self)) self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "ZiplineTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super()." ) except BaseException: # Clean up even on KeyboardInterrupt self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): # We need to get this before it's deleted by the loop. stack = self._instance_teardown_stack for attr in set(vars(self)) - self._pre_setup_attrs: delattr(self, attr) stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback) if PY2: def assertRaisesRegex(self, *args, **kwargs): return self.assertRaisesRegexp(*args, **kwargs) def alias(attr_name): """Make a fixture attribute an alias of another fixture's attribute by default. Parameters ---------- attr_name : str The name of the attribute to alias. Returns ------- p : classproperty A class property that does the property aliasing. Examples -------- >>> class C(object): ... attr = 1 ... >>> class D(C): ... attr_alias = alias('attr') ... >>> D.attr 1 >>> D.attr_alias 1 >>> class E(D): ... attr_alias = 2 ... >>> E.attr 1 >>> E.attr_alias 2 """ return classproperty(flip(getattr, attr_name)) class WithDefaultDateBounds(with_metaclass(DebugMROMeta, object)): """ ZiplineTestCase mixin which makes it possible to synchronize date bounds across fixtures. This fixture should always be the last fixture in bases of any fixture or test case that uses it. Attributes ---------- START_DATE : datetime END_DATE : datetime The date bounds to be used for fixtures that want to have consistent dates. """ START_DATE = pd.Timestamp('2006-01-03', tz='utc') END_DATE = pd.Timestamp('2006-12-29', tz='utc') class WithLogger(object): """ ZiplineTestCase mixin providing cls.log_handler as an instance-level fixture. After init_instance_fixtures has been called `self.log_handler` will be a new ``logbook.NullHandler``. Methods ------- make_log_handler() -> logbook.LogHandler A class method which constructs the new log handler object. By default this will construct a ``NullHandler``. """ make_log_handler = NullHandler @classmethod def init_class_fixtures(cls): super(WithLogger, cls).init_class_fixtures() cls.log = Logger() cls.log_handler = cls.enter_class_context( cls.make_log_handler().applicationbound(), ) class WithAssetFinder(WithDefaultDateBounds): """ ZiplineTestCase mixin providing cls.asset_finder as a class-level fixture. After init_class_fixtures has been called, `cls.asset_finder` is populated with an AssetFinder. Attributes ---------- ASSET_FINDER_EQUITY_SIDS : iterable[int] The default sids to construct equity data for. ASSET_FINDER_EQUITY_SYMBOLS : iterable[str] The default symbols to use for the equities. ASSET_FINDER_EQUITY_START_DATE : datetime The default start date to create equity data for. This defaults to ``START_DATE``. ASSET_FINDER_EQUITY_END_DATE : datetime The default end date to create equity data for. This defaults to ``END_DATE``. ASSET_FINDER_EQUITY_NAMES: iterable[str] The default names to use for the equities. ASSET_FINDER_EQUITY_EXCHANGE : str The default exchange to assign each equity. ASSET_FINDER_COUNTRY_CODE : str The default country code to assign each exchange. Methods ------- make_equity_info() -> pd.DataFrame A class method which constructs the dataframe of equity info to write to the class's asset db. By default this is empty. make_futures_info() -> pd.DataFrame A class method which constructs the dataframe of futures contract info to write to the class's asset db. By default this is empty. make_exchanges_info() -> pd.DataFrame A class method which constructs the dataframe of exchange information to write to the class's assets db. By default this is empty. make_root_symbols_info() -> pd.DataFrame A class method which constructs the dataframe of root symbols information to write to the class's assets db. By default this is empty. make_asset_finder_db_url() -> string A class method which returns the URL at which to create the SQLAlchemy engine. By default provides a URL for an in-memory database. make_asset_finder() -> pd.DataFrame A class method which constructs the actual asset finder object to use for the class. If this method is overridden then the ``make_*_info`` methods may not be respected. See Also -------- zipline.testing.make_simple_equity_info zipline.testing.make_jagged_equity_info zipline.testing.make_rotating_equity_info zipline.testing.make_future_info zipline.testing.make_commodity_future_info """ ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C') ASSET_FINDER_EQUITY_SYMBOLS = None ASSET_FINDER_EQUITY_NAMES = None ASSET_FINDER_EQUITY_EXCHANGE = 'TEST' ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE') ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE') ASSET_FINDER_FUTURE_CHAIN_PREDICATES = CHAIN_PREDICATES ASSET_FINDER_COUNTRY_CODE = '??' @classmethod def _make_info(cls, *args): return None make_futures_info = _make_info make_exchanges_info = _make_info make_root_symbols_info = _make_info make_equity_supplementary_mappings = _make_info del _make_info @classmethod def make_equity_info(cls): return make_simple_equity_info( cls.ASSET_FINDER_EQUITY_SIDS, cls.ASSET_FINDER_EQUITY_START_DATE, cls.ASSET_FINDER_EQUITY_END_DATE, cls.ASSET_FINDER_EQUITY_SYMBOLS, cls.ASSET_FINDER_EQUITY_NAMES, cls.ASSET_FINDER_EQUITY_EXCHANGE, ) @classmethod def make_asset_finder_db_url(cls): return 'sqlite:///:memory:' @classmethod def make_asset_finder(cls): """Returns a new AssetFinder Returns ------- asset_finder : zipline.assets.AssetFinder """ equities = cls.make_equity_info() futures = cls.make_futures_info() root_symbols = cls.make_root_symbols_info() exchanges = cls.make_exchanges_info(equities, futures, root_symbols) if exchanges is None: exchange_names = [ df['exchange'] for df in (equities, futures, root_symbols) if df is not None ] if exchange_names: exchanges = pd.DataFrame({ 'exchange': pd.concat(exchange_names).unique(), 'country_code': cls.ASSET_FINDER_COUNTRY_CODE, }) return cls.enter_class_context(tmp_asset_finder( url=cls.make_asset_finder_db_url(), equities=equities, futures=futures, exchanges=exchanges, root_symbols=root_symbols, equity_supplementary_mappings=( cls.make_equity_supplementary_mappings() ), future_chain_predicates=cls.ASSET_FINDER_FUTURE_CHAIN_PREDICATES, )) @classmethod def init_class_fixtures(cls): super(WithAssetFinder, cls).init_class_fixtures() cls.asset_finder = cls.make_asset_finder() @classlazyval def all_assets(cls): """A list of Assets for all sids in cls.asset_finder. """ return cls.asset_finder.retrieve_all(cls.asset_finder.sids) @classlazyval def exchange_names(cls): """A list of canonical exchange names for all exchanges in this suite. """ infos = itervalues(cls.asset_finder.exchange_info) return sorted(i.canonical_name for i in infos) @classlazyval def assets_by_calendar(cls): """A dict from calendar -> list of assets with that calendar. """ return groupby(lambda a: get_calendar(a.exchange), cls.all_assets) @classlazyval def all_calendars(cls): """A list of all calendars for assets in this test suite. """ return list(cls.assets_by_calendar) # TODO_SS: The API here doesn't make sense in a multi-country test scenario. class WithTradingCalendars(object): """ ZiplineTestCase mixin providing cls.trading_calendar, cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a class-level fixture. After ``init_class_fixtures`` has been called: - `cls.trading_calendar` is populated with a default of the nyse trading calendar for compatibility with existing tests - `cls.all_trading_calendars` is populated with the trading calendars keyed by name, - `cls.trading_calendar_for_asset_type` is populated with the trading calendars keyed by the asset type which uses the respective calendar. Attributes ---------- TRADING_CALENDAR_STRS : iterable iterable of identifiers of the calendars to use. TRADING_CALENDAR_FOR_ASSET_TYPE : dict A dictionary which maps asset type names to the calendar associated with that asset type. """ TRADING_CALENDAR_STRS = ('NYSE',) TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures'} # For backwards compatibility, exisitng tests and fixtures refer to # `trading_calendar` with the assumption that the value is the NYSE # calendar. TRADING_CALENDAR_PRIMARY_CAL = 'NYSE' @classmethod def init_class_fixtures(cls): super(WithTradingCalendars, cls).init_class_fixtures() cls.trading_calendars = {} # Silence `pandas.errors.PerformanceWarning: Non-vectorized DateOffset # being applied to Series or DatetimeIndex` in trading calendar # construction. This causes nosetest to fail. with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) for cal_str in ( set(cls.TRADING_CALENDAR_STRS) | {cls.TRADING_CALENDAR_PRIMARY_CAL} ): # Set name to allow aliasing. calendar = get_calendar(cal_str) setattr(cls, '{0}_calendar'.format(cal_str.lower()), calendar) cls.trading_calendars[cal_str] = calendar type_to_cal = iteritems(cls.TRADING_CALENDAR_FOR_ASSET_TYPE) for asset_type, cal_str in type_to_cal: calendar = get_calendar(cal_str) cls.trading_calendars[asset_type] = calendar cls.trading_calendar = ( cls.trading_calendars[cls.TRADING_CALENDAR_PRIMARY_CAL] ) STATIC_BENCHMARK_PATH = os.path.join( zipline_dir, 'resources', 'market_data', 'SPY_benchmark.csv', ) @remember_last def read_checked_in_benchmark_data(): return get_benchmark_returns_from_file(STATIC_BENCHMARK_PATH) class WithBenchmarkReturns(WithDefaultDateBounds, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.benchmark_returns as a class-level attribute. """ _default_treasury_curves = None @classproperty def BENCHMARK_RETURNS(cls): benchmark_returns = read_checked_in_benchmark_data() # Zipline ordinarily uses cached benchmark returns data, but when # running the zipline tests this cache is not always updated to include # the appropriate dates required by both the futures and equity # calendars. In order to create more reliable and consistent data # throughout the entirety of the tests, we read static benchmark # returns files from source. If a test using this fixture attempts to # run outside of the static date range of the csv files, raise an # exception warning the user to either update the csv files in source # or to use a date range within the current bounds. static_start_date = benchmark_returns.index[0].date() static_end_date = benchmark_returns.index[-1].date() warning_message = ( 'The WithBenchmarkReturns fixture uses static data between ' '{static_start} and {static_end}. To use a start and end date ' 'of {given_start} and {given_end} you will have to update the ' 'file in {benchmark_path} to include the missing dates.'.format( static_start=static_start_date, static_end=static_end_date, given_start=cls.START_DATE.date(), given_end=cls.END_DATE.date(), benchmark_path=STATIC_BENCHMARK_PATH, ) ) if cls.START_DATE.date() < static_start_date or \ cls.END_DATE.date() > static_end_date: raise AssertionError(warning_message) return benchmark_returns class WithSimParams(WithDefaultDateBounds): """ ZiplineTestCase mixin providing cls.sim_params as a class level fixture. Attributes ---------- SIM_PARAMS_CAPITAL_BASE : float SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'} SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'} Forwarded to ``SimulationParameters``. SIM_PARAMS_START : datetime SIM_PARAMS_END : datetime Forwarded to ``SimulationParameters``. If not explicitly overridden these will be ``START_DATE`` and ``END_DATE`` Methods ------- make_simparams(**overrides) Construct a ``SimulationParameters`` using the defaults defined by fixture configuration attributes. Any parameters to ``SimulationParameters`` can be overridden by passing them by keyword. See Also -------- zipline.finance.trading.SimulationParameters """ SIM_PARAMS_CAPITAL_BASE = 1.0e5 SIM_PARAMS_DATA_FREQUENCY = 'daily' SIM_PARAMS_EMISSION_RATE = 'daily' SIM_PARAMS_START = alias('START_DATE') SIM_PARAMS_END = alias('END_DATE') @classmethod def make_simparams(cls, **overrides): kwargs = dict( start_session=cls.SIM_PARAMS_START, end_session=cls.SIM_PARAMS_END, capital_base=cls.SIM_PARAMS_CAPITAL_BASE, data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY, emission_rate=cls.SIM_PARAMS_EMISSION_RATE, trading_calendar=cls.trading_calendar, ) kwargs.update(overrides) return SimulationParameters(**kwargs) @classmethod def init_class_fixtures(cls): super(WithSimParams, cls).init_class_fixtures() cls.sim_params = cls.make_simparams() class WithTradingSessions(WithDefaultDateBounds, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.trading_days, cls.all_trading_sessions as a class-level fixture. After init_class_fixtures has been called, `cls.all_trading_sessions` is populated with a dictionary of calendar name to the DatetimeIndex containing the calendar trading days ranging from: (DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY) `cls.trading_days`, for compatibility with existing tests which make the assumption that trading days are equity only, defaults to the nyse trading sessions. Attributes ---------- DATA_MAX_DAY : datetime The most recent trading day in the calendar. TRADING_DAY_COUNT : int The number of days to put in the calendar. The default value of ``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can override TRADING_DAY_COUNT to request more or less data. """ DATA_MIN_DAY = alias('START_DATE') DATA_MAX_DAY = alias('END_DATE') # For backwards compatibility, exisitng tests and fixtures refer to # `trading_days` with the assumption that the value is days of the NYSE # calendar. trading_days = alias('nyse_sessions') @classmethod def init_class_fixtures(cls): super(WithTradingSessions, cls).init_class_fixtures() cls.trading_sessions = {} for cal_str in cls.TRADING_CALENDAR_STRS: trading_calendar = cls.trading_calendars[cal_str] start_date = cls.DATA_MIN_DAY end_date = cls.DATA_MAX_DAY if not start_date.tzinfo: start_date = start_date.tz_localize('utc') if not end_date.tzinfo: end_date = end_date.tz_localize('utc') sessions = trading_calendar.sessions_in_range( start_date, end_date) # Set name for aliasing. setattr(cls, '{0}_sessions'.format(cal_str.lower()), sessions) cls.trading_sessions[cal_str] = sessions class WithTmpDir(object): """ ZiplineTestCase mixing providing cls.tmpdir as a class-level fixture. After init_class_fixtures has been called, `cls.tmpdir` is populated with a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`. Attributes ---------- TMP_DIR_PATH : str The path to the new directory to create. By default this is None which will create a unique directory in /tmp. """ TMP_DIR_PATH = None @classmethod def init_class_fixtures(cls): super(WithTmpDir, cls).init_class_fixtures() cls.tmpdir = cls.enter_class_context( tmp_dir(path=cls.TMP_DIR_PATH), ) class WithInstanceTmpDir(object): """ ZiplineTestCase mixing providing self.tmpdir as an instance-level fixture. After init_instance_fixtures has been called, `self.tmpdir` is populated with a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`. Attributes ---------- INSTANCE_TMP_DIR_PATH : str The path to the new directory to create. By default this is None which will create a unique directory in /tmp. """ INSTANCE_TMP_DIR_PATH = None def init_instance_fixtures(self): super(WithInstanceTmpDir, self).init_instance_fixtures() self.instance_tmpdir = self.enter_instance_context( tmp_dir(path=self.INSTANCE_TMP_DIR_PATH), ) class WithEquityDailyBarData(WithAssetFinder, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.make_equity_daily_bar_data. Attributes ---------- EQUITY_DAILY_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. EQUITY_DAILY_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_equity_daily_bar_data` will read data from the minute bars defined by `WithEquityMinuteBarData`. The current default is `False`, but could be `True` in the future. EQUITY_DAILY_BAR_COUNTRY_CODES : tuple The countres to create data for. By default this is populated with all of the countries present in the asset finder. Methods ------- make_equity_daily_bar_data(country_code, sids) make_equity_daily_bar_currency_codes(country_code, sids) See Also -------- WithEquityMinuteBarData zipline.testing.create_daily_bar_data """ # noqa EQUITY_DAILY_BAR_START_DATE = alias('START_DATE') EQUITY_DAILY_BAR_END_DATE = alias('END_DATE') EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None @classproperty def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls): # If we're sourcing from minute data, then we almost certainly want the # minute bar calendar to be aligned with the daily bar calendar, so # re-use the same lookback parameter. if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE: return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS else: return 0 @classproperty def EQUITY_DAILY_BAR_COUNTRY_CODES(cls): return cls.asset_finder.country_codes @classmethod def _make_equity_daily_bar_from_minute(cls): assert issubclass(cls, WithEquityMinuteBarData), \ "Can't source daily data from minute without minute data!" assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids) minute_data = dict(cls.make_equity_minute_bar_data()) for asset in assets: yield asset.sid, minute_frame_to_session_frame( minute_data[asset.sid], cls.trading_calendars[Equity]) @classmethod def make_equity_daily_bar_data(cls, country_code, sids): """ Create daily pricing data. Parameters ---------- country_code : str An ISO 3166 alpha-2 country code. Data should be created for this country. sids : tuple[int] The sids to include in the data. Yields ------ (int, pd.DataFrame) A sid, dataframe pair to be passed to a daily bar writer. The dataframe should be indexed by date, with columns of ('open', 'high', 'low', 'close', 'volume', 'day', & 'id'). """ # Requires a WithEquityMinuteBarData to come before in the MRO. # Resample that data so that daily and minute bar data are aligned. if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE: return cls._make_equity_daily_bar_from_minute() else: return create_daily_bar_data(cls.equity_daily_bar_days, sids) @classmethod def make_equity_daily_bar_currency_codes(cls, country_code, sids): """Create listing currencies. Default is to list all assets in USD. Parameters ---------- country_code : str An ISO 3166 alpha-2 country code. Data should be created for this country. sids : tuple[int] The sids to include in the data. Returns ------- currency_codes : pd.Series[int, str] Map from sids to currency for that sid's prices. """ return pd.Series(index=list(sids), data='USD') @classmethod def init_class_fixtures(cls): super(WithEquityDailyBarData, cls).init_class_fixtures() trading_calendar = cls.trading_calendars[Equity] if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE): first_session = cls.EQUITY_DAILY_BAR_START_DATE else: first_session = trading_calendar.minute_to_session_label( pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE) ) if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0: first_session = trading_calendar.sessions_window( first_session, -1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS )[0] days = trading_calendar.sessions_in_range( first_session, cls.EQUITY_DAILY_BAR_END_DATE, ) cls.equity_daily_bar_days = days class WithFutureDailyBarData(WithAssetFinder, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.make_future_daily_bar_data. Attributes ---------- FUTURE_DAILY_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. FUTURE_DAILY_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_future_daily_bar_data` will read data from the minute bars defined by `WithFutureMinuteBarData`. The current default is `False`, but could be `True` in the future. Methods ------- make_future_daily_bar_data() -> iterable[(int, pd.DataFrame)] A class method that returns an iterator of (sid, dataframe) pairs which will be written to the bcolz files that the class's ``BcolzDailyBarReader`` will read from. By default this creates some simple synthetic data with :func:`~zipline.testing.create_daily_bar_data` See Also -------- WithFutureMinuteBarData zipline.testing.create_daily_bar_data """ FUTURE_DAILY_BAR_USE_FULL_CALENDAR = False FUTURE_DAILY_BAR_START_DATE = alias('START_DATE') FUTURE_DAILY_BAR_END_DATE = alias('END_DATE') FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = None @classproperty def FUTURE_DAILY_BAR_LOOKBACK_DAYS(cls): # If we're sourcing from minute data, then we almost certainly want the # minute bar calendar to be aligned with the daily bar calendar, so # re-use the same lookback parameter. if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE: return cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS else: return 0 @classmethod def _make_future_daily_bar_from_minute(cls): assert issubclass(cls, WithFutureMinuteBarData), \ "Can't source daily data from minute without minute data!" assets = cls.asset_finder.retrieve_all(cls.asset_finder.futures_sids) minute_data = dict(cls.make_future_minute_bar_data()) for asset in assets: yield asset.sid, minute_frame_to_session_frame( minute_data[asset.sid], cls.trading_calendars[Future]) @classmethod def make_future_daily_bar_data(cls): # Requires a WithFutureMinuteBarData to come before in the MRO. # Resample that data so that daily and minute bar data are aligned. if cls.FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE: return cls._make_future_daily_bar_from_minute() else: return create_daily_bar_data( cls.future_daily_bar_days, cls.asset_finder.futures_sids, ) @classmethod def init_class_fixtures(cls): super(WithFutureDailyBarData, cls).init_class_fixtures() trading_calendar = cls.trading_calendars[Future] if cls.FUTURE_DAILY_BAR_USE_FULL_CALENDAR: days = trading_calendar.all_sessions else: if trading_calendar.is_session(cls.FUTURE_DAILY_BAR_START_DATE): first_session = cls.FUTURE_DAILY_BAR_START_DATE else: first_session = trading_calendar.minute_to_session_label( pd.Timestamp(cls.FUTURE_DAILY_BAR_START_DATE) ) if cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS > 0: first_session = trading_calendar.sessions_window( first_session, -1 * cls.FUTURE_DAILY_BAR_LOOKBACK_DAYS )[0] days = trading_calendar.sessions_in_range( first_session, cls.FUTURE_DAILY_BAR_END_DATE, ) cls.future_daily_bar_days = days class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir): """ ZiplineTestCase mixin providing cls.bcolz_daily_bar_path, cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_daily_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`. - `cls.bcolz_daily_bar_ctable` is populated with data returned from `cls.make_equity_daily_bar_data`. By default this calls :func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`. - `cls.bcolz_equity_daily_bar_reader` is a daily bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_DAILY_BAR_PATH : str The path inside the tmpdir where this will be written. EQUITY_DAILY_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool If this flag is set the ``equity_daily_bar_days`` will be the full set of trading days from the trading environment. This flag overrides ``EQUITY_DAILY_BAR_LOOKBACK_DAYS``. BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int If this flag is set, use the value as the `read_all_threshold` parameter to BcolzDailyBarReader, otherwise use the default value. EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_equity_daily_bar_data` will read data from the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`. Methods ------- make_bcolz_daily_bar_rootdir_path() -> string A class method that returns the path for the rootdir of the daily bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityMinuteBarReader WithDataPortal zipline.testing.create_daily_bar_data """ BCOLZ_DAILY_BAR_PATH = 'daily_equity_pricing.bcolz' BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None BCOLZ_DAILY_BAR_COUNTRY_CODE = None EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False # allows WithBcolzEquityDailyBarReaderFromCSVs to call the # `write_csvs`method without needing to reimplement `init_class_fixtures` _write_method_name = 'write' # What to do when data being written is invalid, e.g. nan, inf, etc. # options are: 'warn', 'raise', 'ignore' INVALID_DATA_BEHAVIOR = 'warn' @classproperty def BCOLZ_DAILY_BAR_COUNTRY_CODE(cls): return cls.EQUITY_DAILY_BAR_COUNTRY_CODES[0] @classmethod def make_bcolz_daily_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures() cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path() days = cls.equity_daily_bar_days sids = cls.asset_finder.equities_sids_for_country_code( cls.BCOLZ_DAILY_BAR_COUNTRY_CODE ) trading_calendar = cls.trading_calendars[Equity] cls.bcolz_daily_bar_ctable = t = getattr( BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]), cls._write_method_name, )( cls.make_equity_daily_bar_data( country_code=cls.BCOLZ_DAILY_BAR_COUNTRY_CODE, sids=sids, ), invalid_data_behavior=cls.INVALID_DATA_BEHAVIOR ) if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None: cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader( t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD) else: cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t) class WithBcolzFutureDailyBarReader(WithFutureDailyBarData, WithTmpDir): """ ZiplineTestCase mixin providing cls.bcolz_daily_bar_path, cls.bcolz_daily_bar_ctable, and cls.bcolz_future_daily_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_daily_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`. - `cls.bcolz_daily_bar_ctable` is populated with data returned from `cls.make_future_daily_bar_data`. By default this calls :func:`zipline.pipeline.loaders.synthetic.make_future_daily_bar_data`. - `cls.bcolz_future_daily_bar_reader` is a daily bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_DAILY_BAR_PATH : str The path inside the tmpdir where this will be written. FUTURE_DAILY_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. FUTURE_DAILY_BAR_USE_FULL_CALENDAR : bool If this flag is set the ``future_daily_bar_days`` will be the full set of trading days from the trading environment. This flag overrides ``FUTURE_DAILY_BAR_LOOKBACK_DAYS``. BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD : int If this flag is set, use the value as the `read_all_threshold` parameter to BcolzDailyBarReader, otherwise use the default value. FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_future_daily_bar_data` will read data from the minute bar reader defined by a `WithBcolzFutureMinuteBarReader`. Methods ------- make_bcolz_daily_bar_rootdir_path() -> string A class method that returns the path for the rootdir of the daily bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in the shared temp directory. See Also -------- WithBcolzFutureMinuteBarReader WithDataPortal zipline.testing.create_daily_bar_data """ BCOLZ_FUTURE_DAILY_BAR_PATH = 'daily_future_pricing.bcolz' BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD = None FUTURE_DAILY_BAR_SOURCE_FROM_MINUTE = False # What to do when data being written is invalid, e.g. nan, inf, etc. # options are: 'warn', 'raise', 'ignore' BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR = 'warn' BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME = 'write' @classmethod def make_bcolz_future_daily_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_DAILY_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzFutureDailyBarReader, cls).init_class_fixtures() p = cls.make_bcolz_future_daily_bar_rootdir_path() cls.future_bcolz_daily_bar_path = p days = cls.future_daily_bar_days trading_calendar = cls.trading_calendars[Future] cls.future_bcolz_daily_bar_ctable = t = getattr( BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]), cls.BCOLZ_FUTURE_DAILY_BAR_WRITE_METHOD_NAME, )( cls.make_future_daily_bar_data(), invalid_data_behavior=( cls.BCOLZ_FUTURE_DAILY_BAR_INVALID_DATA_BEHAVIOR ) ) if cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD is not None: cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader( t, cls.BCOLZ_FUTURE_DAILY_BAR_READ_ALL_THRESHOLD) else: cls.bcolz_future_daily_bar_reader = BcolzDailyBarReader(t) class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader): """ ZiplineTestCase mixin that provides cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV file paths. """ _write_method_name = 'write_csvs' def _trading_days_for_minute_bars(calendar, start_date, end_date, lookback_days): first_session = calendar.minute_to_session_label(start_date) if lookback_days > 0: first_session = calendar.sessions_window( first_session, -1 * lookback_days )[0] if not first_session.tzinfo: first_session = fist_session.tz_localize('utc') if not end_date.tzinfo: end_date = end_date.tz_localize('utc') return calendar.sessions_in_range(first_session, end_date) # TODO_SS: This currently doesn't define any relationship between country_code # and calendar, which would be useful downstream. class WithWriteHDF5DailyBars(WithEquityDailyBarData, WithTmpDir): """ Fixture class defining the capability of writing HDF5 daily bars to disk. Uses cls.make_equity_daily_bar_data (inherited from WithEquityDailyBarData) to determine the data to write. Methods ------- write_hdf5_daily_bars(cls, path, country_codes) Creates an HDF5 file on disk and populates it with pricing data. Attributes ---------- HDF5_DAILY_BAR_CHUNK_SIZE """ HDF5_DAILY_BAR_CHUNK_SIZE = 30 @classmethod def write_hdf5_daily_bars(cls, path, country_codes): """ Write HDF5 pricing data using an HDF5DailyBarWriter. Parameters ---------- path : str Location (relative to cls.tmpdir) at which to write data. country_codes : list[str] List of country codes to write. Returns ------- written : h5py.File A read-only h5py.File pointing at the written data. The returned file is registered to be closed automatically during class teardown. """ ensure_directory_containing(path) writer = HDF5DailyBarWriter(path, cls.HDF5_DAILY_BAR_CHUNK_SIZE) write_hdf5_daily_bars( writer, cls.asset_finder, country_codes, cls.make_equity_daily_bar_data, cls.make_equity_daily_bar_currency_codes, ) # Open the file and mark it for closure during teardown. return cls.enter_class_context(writer.h5_file(mode='r')) class WithHDF5EquityMultiCountryDailyBarReader(WithWriteHDF5DailyBars): """ Fixture providing cls.hdf5_daily_bar_path and cls.hdf5_equity_daily_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.hdf5_daily_bar_path` is populated with `cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH)`. - The file at `cls.hdf5_daily_bar_path` is populated with data returned from `cls.make_equity_daily_bar_data`. By default this calls :func:`zipline.pipeline.loaders.synthetic.make_equity_daily_bar_data`. - `cls.hdf5_equity_daily_bar_reader` is a daily bar reader pointing to the file that was just written to. Attributes ---------- HDF5_DAILY_BAR_PATH : str The path inside the tmpdir where this will be written. HDF5_DAILY_BAR_COUNTRY_CODE : str The ISO 3166 alpha-2 country code for the country to write/read. Methods ------- make_hdf5_daily_bar_path() -> string A class method that returns the path for the rootdir of the daily bars ctable. By default this is a subdirectory HDF5_DAILY_BAR_PATH in the shared temp directory. See Also -------- WithDataPortal zipline.testing.create_daily_bar_data """ HDF5_DAILY_BAR_PATH = 'daily_equity_pricing.h5' HDF5_DAILY_BAR_COUNTRY_CODES = alias('EQUITY_DAILY_BAR_COUNTRY_CODES') @classmethod def make_hdf5_daily_bar_path(cls): return cls.tmpdir.getpath(cls.HDF5_DAILY_BAR_PATH) @classmethod def init_class_fixtures(cls): super( WithHDF5EquityMultiCountryDailyBarReader, cls, ).init_class_fixtures() cls.hdf5_daily_bar_path = path = cls.make_hdf5_daily_bar_path() f = cls.write_hdf5_daily_bars(path, cls.HDF5_DAILY_BAR_COUNTRY_CODES) cls.single_country_hdf5_equity_daily_bar_readers = { country_code: HDF5DailyBarReader.from_file(f, country_code) for country_code in f } cls.hdf5_equity_daily_bar_reader = MultiCountryDailyBarReader( cls.single_country_hdf5_equity_daily_bar_readers ) class WithEquityMinuteBarData(WithAssetFinder, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.equity_minute_bar_days. After init_class_fixtures has been called: - `cls.equity_minute_bar_days` has the range over which data has been generated. Attributes ---------- EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. EQUITY_MINUTE_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. EQUITY_MINUTE_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. Methods ------- make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)] Classmethod producing an iterator of (sid, minute_data) pairs. The default implementation invokes zipline.testing.core.create_minute_bar_data. See Also -------- WithEquityDailyBarData zipline.testing.create_minute_bar_data """ EQUITY_MINUTE_BAR_LOOKBACK_DAYS = 0 EQUITY_MINUTE_BAR_START_DATE = alias('START_DATE') EQUITY_MINUTE_BAR_END_DATE = alias('END_DATE') @classmethod def make_equity_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Equity] return create_minute_bar_data( trading_calendar.minutes_for_sessions_in_range( cls.equity_minute_bar_days[0], cls.equity_minute_bar_days[-1], ), cls.asset_finder.equities_sids, ) @classmethod def init_class_fixtures(cls): super(WithEquityMinuteBarData, cls).init_class_fixtures() trading_calendar = cls.trading_calendars[Equity] cls.equity_minute_bar_days = _trading_days_for_minute_bars( trading_calendar, cls.EQUITY_MINUTE_BAR_START_DATE, cls.EQUITY_MINUTE_BAR_END_DATE, cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS ) class WithFutureMinuteBarData(WithAssetFinder, WithTradingCalendars): """ ZiplineTestCase mixin providing cls.future_minute_bar_days. After init_class_fixtures has been called: - `cls.future_minute_bar_days` has the range over which data has been generated. Attributes ---------- FUTURE_MINUTE_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. FUTURE_MINUTE_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. FUTURE_MINUTE_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. Methods ------- make_future_minute_bar_data() -> iterable[(int, pd.DataFrame)] A class method that returns a dict mapping sid to dataframe which will be written to into the the format of the inherited class which writes the minute bar data for use by a reader. By default this creates some simple sythetic data with :func:`~zipline.testing.create_minute_bar_data` See Also -------- zipline.testing.create_minute_bar_data """ FUTURE_MINUTE_BAR_LOOKBACK_DAYS = 0 FUTURE_MINUTE_BAR_START_DATE = alias('START_DATE') FUTURE_MINUTE_BAR_END_DATE = alias('END_DATE') @classmethod def make_future_minute_bar_data(cls): trading_calendar = get_calendar('us_futures') return create_minute_bar_data( trading_calendar.minutes_for_sessions_in_range( cls.future_minute_bar_days[0], cls.future_minute_bar_days[-1], ), cls.asset_finder.futures_sids, ) @classmethod def init_class_fixtures(cls): super(WithFutureMinuteBarData, cls).init_class_fixtures() trading_calendar = get_calendar('us_futures') cls.future_minute_bar_days = _trading_days_for_minute_bars( trading_calendar, cls.FUTURE_MINUTE_BAR_START_DATE, cls.FUTURE_MINUTE_BAR_END_DATE, cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS ) class WithBcolzEquityMinuteBarReader(WithEquityMinuteBarData, WithTmpDir): """ ZiplineTestCase mixin providing cls.bcolz_minute_bar_path, cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_minute_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`. - `cls.bcolz_minute_bar_ctable` is populated with data returned from `cls.make_equity_minute_bar_data`. By default this calls :func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`. - `cls.bcolz_equity_minute_bar_reader` is a minute bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_MINUTE_BAR_PATH : str The path inside the tmpdir where this will be written. Methods ------- make_bcolz_minute_bar_rootdir_path() -> string A class method that returns the path for the directory that contains the minute bar ctables. By default this is a subdirectory BCOLZ_MINUTE_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityDailyBarReader WithDataPortal zipline.testing.create_minute_bar_data """ BCOLZ_EQUITY_MINUTE_BAR_PATH = 'minute_equity_pricing' @classmethod def make_bcolz_equity_minute_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_EQUITY_MINUTE_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures() cls.bcolz_equity_minute_bar_path = p = \ cls.make_bcolz_equity_minute_bar_rootdir_path() days = cls.equity_minute_bar_days writer = BcolzMinuteBarWriter( p, cls.trading_calendars[Equity], days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY ) writer.write(cls.make_equity_minute_bar_data()) cls.bcolz_equity_minute_bar_reader = \ BcolzMinuteBarReader(p) class WithBcolzFutureMinuteBarReader(WithFutureMinuteBarData, WithTmpDir): """ ZiplineTestCase mixin providing cls.bcolz_minute_bar_path, cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_minute_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`. - `cls.bcolz_minute_bar_ctable` is populated with data returned from `cls.make_equity_minute_bar_data`. By default this calls :func:`zipline.pipeline.loaders.synthetic.make_equity_minute_bar_data`. - `cls.bcolz_equity_minute_bar_reader` is a minute bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_FUTURE_MINUTE_BAR_PATH : str The path inside the tmpdir where this will be written. Methods ------- make_bcolz_minute_bar_rootdir_path() -> string A class method that returns the path for the directory that contains the minute bar ctables. By default this is a subdirectory BCOLZ_MINUTE_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityDailyBarReader WithDataPortal zipline.testing.create_minute_bar_data """ BCOLZ_FUTURE_MINUTE_BAR_PATH = 'minute_future_pricing' OHLC_RATIOS_PER_SID = None @classmethod def make_bcolz_future_minute_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_MINUTE_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures() trading_calendar = get_calendar('us_futures') cls.bcolz_future_minute_bar_path = p = \ cls.make_bcolz_future_minute_bar_rootdir_path() days = cls.future_minute_bar_days writer = BcolzMinuteBarWriter( p, trading_calendar, days[0], days[-1], FUTURES_MINUTES_PER_DAY, ohlc_ratios_per_sid=cls.OHLC_RATIOS_PER_SID, ) writer.write(cls.make_future_minute_bar_data()) cls.bcolz_future_minute_bar_reader = \ BcolzMinuteBarReader(p) class WithConstantEquityMinuteBarData(WithEquityMinuteBarData): EQUITY_MINUTE_CONSTANT_LOW = 3.0 EQUITY_MINUTE_CONSTANT_OPEN = 4.0 EQUITY_MINUTE_CONSTANT_CLOSE = 5.0 EQUITY_MINUTE_CONSTANT_HIGH = 6.0 EQUITY_MINUTE_CONSTANT_VOLUME = 100.0 @classmethod def make_equity_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Equity] sids = cls.asset_finder.equities_sids minutes = trading_calendar.minutes_for_sessions_in_range( cls.equity_minute_bar_days[0], cls.equity_minute_bar_days[-1], ) frame = pd.DataFrame( { 'open': cls.EQUITY_MINUTE_CONSTANT_OPEN, 'high': cls.EQUITY_MINUTE_CONSTANT_HIGH, 'low': cls.EQUITY_MINUTE_CONSTANT_LOW, 'close': cls.EQUITY_MINUTE_CONSTANT_CLOSE, 'volume': cls.EQUITY_MINUTE_CONSTANT_VOLUME, }, index=minutes, ) return ((sid, frame) for sid in sids) class WithConstantFutureMinuteBarData(WithFutureMinuteBarData): FUTURE_MINUTE_CONSTANT_LOW = 3.0 FUTURE_MINUTE_CONSTANT_OPEN = 4.0 FUTURE_MINUTE_CONSTANT_CLOSE = 5.0 FUTURE_MINUTE_CONSTANT_HIGH = 6.0 FUTURE_MINUTE_CONSTANT_VOLUME = 100.0 @classmethod def make_future_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Future] sids = cls.asset_finder.futures_sids minutes = trading_calendar.minutes_for_sessions_in_range( cls.future_minute_bar_days[0], cls.future_minute_bar_days[-1], ) frame = pd.DataFrame( { 'open': cls.FUTURE_MINUTE_CONSTANT_OPEN, 'high': cls.FUTURE_MINUTE_CONSTANT_HIGH, 'low': cls.FUTURE_MINUTE_CONSTANT_LOW, 'close': cls.FUTURE_MINUTE_CONSTANT_CLOSE, 'volume': cls.FUTURE_MINUTE_CONSTANT_VOLUME, }, index=minutes, ) return ((sid, frame) for sid in sids) class WithAdjustmentReader(WithBcolzEquityDailyBarReader): """ ZiplineTestCase mixin providing cls.adjustment_reader as a class level fixture. After init_class_fixtures has been called, `cls.adjustment_reader` will be populated with a new SQLiteAdjustmentReader object. The data that will be written can be passed by overriding `make_{field}_data` where field may be `splits`, `mergers` `dividends`, or `stock_dividends`. The daily bar reader used for this adjustment reader may be customized by overriding `make_adjustment_writer_equity_daily_bar_reader`. This is useful to providing a `MockDailyBarReader`. Methods ------- make_splits_data() -> pd.DataFrame A class method that returns a dataframe of splits data to write to the class's adjustment db. By default this is empty. make_mergers_data() -> pd.DataFrame A class method that returns a dataframe of mergers data to write to the class's adjustment db. By default this is empty. make_dividends_data() -> pd.DataFrame A class method that returns a dataframe of dividends data to write to the class's adjustment db. By default this is empty. make_stock_dividends_data() -> pd.DataFrame A class method that returns a dataframe of stock dividends data to write to the class's adjustment db. By default this is empty. make_adjustment_db_conn_str() -> string A class method that returns the sqlite3 connection string for the database in to which the adjustments will be written. By default this is an in-memory database. make_adjustment_writer_equity_daily_bar_reader() -> pd.DataFrame A class method that returns the daily bar reader to use for the class's adjustment writer. By default this is the class's actual ``bcolz_equity_daily_bar_reader`` as inherited from ``WithBcolzEquityDailyBarReader``. This should probably not be overridden; however, some tests used a ``MockDailyBarReader`` for this. make_adjustment_writer(conn: sqlite3.Connection) -> AdjustmentWriter A class method that constructs the adjustment which will be used to write the data into the connection to be used by the class's adjustment reader. See Also -------- zipline.testing.MockDailyBarReader """ @classmethod def _make_data(cls): return None make_splits_data = _make_data make_mergers_data = _make_data make_dividends_data = _make_data make_stock_dividends_data = _make_data del _make_data @classmethod def make_adjustment_writer(cls, conn): return SQLiteAdjustmentWriter( conn, cls.make_adjustment_writer_equity_daily_bar_reader(), ) @classmethod def make_adjustment_writer_equity_daily_bar_reader(cls): return cls.bcolz_equity_daily_bar_reader @classmethod def make_adjustment_db_conn_str(cls): return ':memory:' @classmethod def init_class_fixtures(cls): super(WithAdjustmentReader, cls).init_class_fixtures() conn = sqlite3.connect(cls.make_adjustment_db_conn_str()) # Silence numpy DeprecationWarnings which cause nosetest to fail with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) cls.make_adjustment_writer(conn).write( splits=cls.make_splits_data(), mergers=cls.make_mergers_data(), dividends=cls.make_dividends_data(), stock_dividends=cls.make_stock_dividends_data(), ) cls.adjustment_reader = SQLiteAdjustmentReader(conn) class WithUSEquityPricingPipelineEngine(WithAdjustmentReader, WithTradingSessions): """ Mixin providing the following as a class-level fixtures. - cls.data_root_dir - cls.findata_dir - cls.pipeline_engine - cls.adjustments_db_path """ @classmethod def init_class_fixtures(cls): cls.data_root_dir = cls.enter_class_context(tmp_dir()) cls.findata_dir = cls.data_root_dir.makedir('findata') super(WithUSEquityPricingPipelineEngine, cls).init_class_fixtures() loader = USEquityPricingLoader.without_fx( cls.bcolz_equity_daily_bar_reader, SQLiteAdjustmentReader(cls.adjustments_db_path), ) def get_loader(column): if column in USEquityPricing.columns: return loader else: raise AssertionError("No loader registered for %s" % column) cls.pipeline_engine = SimplePipelineEngine( get_loader=get_loader, asset_finder=cls.asset_finder, default_domain=US_EQUITIES, ) @classmethod def make_adjustment_db_conn_str(cls): cls.adjustments_db_path = os.path.join( cls.findata_dir, 'adjustments', cls.END_DATE.strftime("%Y-%m-%d-adjustments.db") ) ensure_directory(os.path.dirname(cls.adjustments_db_path)) return cls.adjustments_db_path class WithSeededRandomPipelineEngine(WithTradingSessions, WithAssetFinder): """ ZiplineTestCase mixin providing class-level fixtures for running pipelines against deterministically-generated random data. Attributes ---------- SEEDED_RANDOM_PIPELINE_SEED : int Fixture input. Random seed used to initialize the random state loader. seeded_random_loader : SeededRandomLoader Fixture output. Loader capable of providing columns for zipline.pipeline.data.testing.TestingDataSet. seeded_random_engine : SimplePipelineEngine Fixture output. A pipeline engine that will use seeded_random_loader as its only data provider. Methods ------- run_pipeline(start_date, end_date) Run a pipeline with self.seeded_random_engine. See Also -------- zipline.pipeline.loaders.synthetic.SeededRandomLoader zipline.pipeline.loaders.testing.make_seeded_random_loader zipline.pipeline.engine.SimplePipelineEngine """ SEEDED_RANDOM_PIPELINE_SEED = 42 SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN = GENERIC @classmethod def init_class_fixtures(cls): super(WithSeededRandomPipelineEngine, cls).init_class_fixtures() cls._sids = cls.asset_finder.sids cls.seeded_random_loader = loader = make_seeded_random_loader( cls.SEEDED_RANDOM_PIPELINE_SEED, cls.trading_days, cls._sids, columns=cls.make_seeded_random_loader_columns(), ) cls.seeded_random_engine = SimplePipelineEngine( get_loader=lambda column: loader, asset_finder=cls.asset_finder, default_domain=cls.SEEDED_RANDOM_PIPELINE_DEFAULT_DOMAIN, default_hooks=cls.make_seeded_random_pipeline_engine_hooks(), populate_initial_workspace=( cls.make_seeded_random_populate_initial_workspace() ), ) @classmethod def make_seeded_random_pipeline_engine_hooks(cls): return [] @classmethod def make_seeded_random_populate_initial_workspace(cls): return None @classmethod def make_seeded_random_loader_columns(cls): return TestingDataSet.columns def raw_expected_values(self, column, start_date, end_date): """ Get an array containing the raw values we expect to be produced for the given dates between start_date and end_date, inclusive. """ all_values = self.seeded_random_loader.values( column.dtype, self.trading_days, self._sids, ) row_slice = self.trading_days.slice_indexer(start_date, end_date) return all_values[row_slice] def run_pipeline(self, pipeline, start_date, end_date, hooks=None): """ Run a pipeline with self.seeded_random_engine. """ return self.seeded_random_engine.run_pipeline( pipeline, start_date, end_date, hooks=hooks, ) def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize, hooks=None): """ Run a chunked pipeline with self.seeded_random_engine. """ return self.seeded_random_engine.run_chunked_pipeline( pipeline, start_date, end_date, chunksize=chunksize, hooks=hooks, ) class WithDataPortal(WithAdjustmentReader, # Ordered so that bcolz minute reader is used first. WithBcolzEquityMinuteBarReader, WithBcolzFutureMinuteBarReader): """ ZiplineTestCase mixin providing self.data_portal as an instance level fixture. After init_instance_fixtures has been called, `self.data_portal` will be populated with a new data portal created by passing in the class's trading env, `cls.bcolz_equity_minute_bar_reader`, `cls.bcolz_equity_daily_bar_reader`, and `cls.adjustment_reader`. Attributes ---------- DATA_PORTAL_USE_DAILY_DATA : bool Should the daily bar reader be used? Defaults to True. DATA_PORTAL_USE_MINUTE_DATA : bool Should the minute bar reader be used? Defaults to True. DATA_PORTAL_USE_ADJUSTMENTS : bool Should the adjustment reader be used? Defaults to True. Methods ------- make_data_portal() -> DataPortal Method which returns the data portal to be used for each test case. If this is overridden, the ``DATA_PORTAL_USE_*`` attributes may not be respected. """ DATA_PORTAL_USE_DAILY_DATA = True DATA_PORTAL_USE_MINUTE_DATA = True DATA_PORTAL_USE_ADJUSTMENTS = True DATA_PORTAL_FIRST_TRADING_DAY = None DATA_PORTAL_LAST_AVAILABLE_SESSION = None DATA_PORTAL_LAST_AVAILABLE_MINUTE = None DATA_PORTAL_MINUTE_HISTORY_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH DATA_PORTAL_DAILY_HISTORY_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH def make_data_portal(self): if self.DATA_PORTAL_FIRST_TRADING_DAY is None: if self.DATA_PORTAL_USE_MINUTE_DATA: self.DATA_PORTAL_FIRST_TRADING_DAY = ( self.bcolz_equity_minute_bar_reader. first_trading_day) elif self.DATA_PORTAL_USE_DAILY_DATA: self.DATA_PORTAL_FIRST_TRADING_DAY = ( self.bcolz_equity_daily_bar_reader. first_trading_day) return DataPortal( self.asset_finder, self.trading_calendar, first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY, equity_daily_reader=( self.bcolz_equity_daily_bar_reader if self.DATA_PORTAL_USE_DAILY_DATA else None ), equity_minute_reader=( self.bcolz_equity_minute_bar_reader if self.DATA_PORTAL_USE_MINUTE_DATA else None ), adjustment_reader=( self.adjustment_reader if self.DATA_PORTAL_USE_ADJUSTMENTS else None ), future_minute_reader=( self.bcolz_future_minute_bar_reader if self.DATA_PORTAL_USE_MINUTE_DATA else None ), future_daily_reader=( MinuteResampleSessionBarReader( self.bcolz_future_minute_bar_reader.trading_calendar, self.bcolz_future_minute_bar_reader) if self.DATA_PORTAL_USE_MINUTE_DATA else None ), last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION, last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE, minute_history_prefetch_length=self. DATA_PORTAL_MINUTE_HISTORY_PREFETCH, daily_history_prefetch_length=self. DATA_PORTAL_DAILY_HISTORY_PREFETCH, ) def init_instance_fixtures(self): super(WithDataPortal, self).init_instance_fixtures() self.data_portal = self.make_data_portal() class WithResponses(object): """ ZiplineTestCase mixin that provides self.responses as an instance fixture. After init_instance_fixtures has been called, `self.responses` will be a new `responses.RequestsMock` object. Users may add new endpoints to this with the `self.responses.add` method. """ def init_instance_fixtures(self): super(WithResponses, self).init_instance_fixtures() self.responses = self.enter_instance_context( responses.RequestsMock(), ) class WithCreateBarData(WithDataPortal): CREATE_BARDATA_DATA_FREQUENCY = 'minute' def create_bardata(self, simulation_dt_func, restrictions=None): return BarData( self.data_portal, simulation_dt_func, self.CREATE_BARDATA_DATA_FREQUENCY, self.trading_calendar, restrictions or NoRestrictions() ) class WithMakeAlgo(WithBenchmarkReturns, WithSimParams, WithLogger, WithDataPortal): """ ZiplineTestCase mixin that provides a ``make_algo`` method. """ START_DATE = pd.Timestamp('2014-12-29', tz='UTC') END_DATE = pd.Timestamp('2015-1-05', tz='UTC') SIM_PARAMS_DATA_FREQUENCY = 'minute' DEFAULT_ALGORITHM_CLASS = TradingAlgorithm @classproperty def BENCHMARK_SID(cls): """The sid to use as a benchmark. Can be overridden to use an alternative benchmark. """ return cls.asset_finder.sids[0] def merge_with_inherited_algo_kwargs(self, overriding_type, suite_overrides, method_overrides): """ Helper for subclasses overriding ``make_algo_kwargs``. A common pattern for tests using `WithMakeAlgoKwargs` is that a particular test suite has a set of default keywords it wants to use everywhere, but also accepts test-specific overrides. Test suites that fit that pattern can call this method and pass the test class, suite-specific overrides, and method-specific overrides, and this method takes care of fetching parent class overrides and merging them with the suite- and instance-specific overrides. Parameters ---------- overriding_type : type The type from which we're being called. This is forwarded to super().make_algo_kwargs() suite_overrides : dict Keywords which should take precedence over kwargs returned by super(overriding_type, self).make_algo_kwargs(). These are generally keyword arguments that are constant within a test suite. method_overrides : dict Keywords which should take precedence over `suite_overrides` and superclass kwargs. These are generally keyword arguments that are overridden on a per-test basis. """ # NOTE: This is a weird invocation of super(). # Our goal here is to provide the behavior that the caller would get if # they called super() in the normal way, so that we dispatch to the # make_algo_kwargs() for the parent of the type that's calling # into us. We achieve that goal by requiring the caller to tell us # what type they're calling us from. return super(overriding_type, self).make_algo_kwargs( **merge(suite_overrides, method_overrides) ) def make_algo_kwargs(self, **overrides): if self.BENCHMARK_SID is None: overrides.setdefault('benchmark_returns', self.BENCHMARK_RETURNS) return merge( { 'sim_params': self.sim_params, 'data_portal': self.data_portal, 'benchmark_sid': self.BENCHMARK_SID, }, overrides, ) def make_algo(self, algo_class=None, **overrides): if algo_class is None: algo_class = self.DEFAULT_ALGORITHM_CLASS return algo_class(**self.make_algo_kwargs(**overrides)) def run_algorithm(self, **overrides): """ Create and run an TradingAlgorithm in memory. """ return self.make_algo(**overrides).run() class WithWerror(object): @classmethod def init_class_fixtures(cls): cls.enter_class_context(warnings.catch_warnings()) warnings.simplefilter('error') super(WithWerror, cls).init_class_fixtures() register_calendar_alias("TEST", "NYSE") class WithSeededRandomState(object): RANDOM_SEED = np.array(list('lmao'), dtype='S1').view('i4').item() def init_instance_fixtures(self): super(WithSeededRandomState, self).init_instance_fixtures() self.rand = np.random.RandomState(self.RANDOM_SEED) class WithFXRates(object): """Fixture providing a factory for in-memory exchange rate data. """ # Start date for exchange rates data. FX_RATES_START_DATE = alias('START_DATE') # End date for exchange rates data. FX_RATES_END_DATE = alias('END_DATE') # Calendar to which exchange rates data is aligned. FX_RATES_CALENDAR = '24/5' # Currencies between which exchange rates can be calculated. FX_RATES_CURRENCIES = ["USD", "CAD", "GBP", "EUR"] # Kinds of rates for which exchange rate data is present. FX_RATES_RATE_NAMES = ["mid"] # Default chunk size used for fx artifact compression. HDF5_FX_CHUNK_SIZE = 75 # Rate used by default for Pipeline API queries that don't specify a rate # explicitly. @classproperty def FX_RATES_DEFAULT_RATE(cls): return cls.FX_RATES_RATE_NAMES[0] @classmethod def init_class_fixtures(cls): super(WithFXRates, cls).init_class_fixtures() cal = get_calendar(cls.FX_RATES_CALENDAR) cls.fx_rates_sessions = cal.sessions_in_range( cls.FX_RATES_START_DATE, cls.FX_RATES_END_DATE, ) cls.fx_rates = cls.make_fx_rates( cls.FX_RATES_RATE_NAMES, cls.FX_RATES_CURRENCIES, cls.fx_rates_sessions, ) cls.in_memory_fx_rate_reader = InMemoryFXRateReader( cls.fx_rates, cls.FX_RATES_DEFAULT_RATE, ) @classmethod def make_fx_rates_from_reference(cls, reference): """ Helper method for implementing make_fx_rates. Takes a (dates x currencies) DataFrame of "reference" values, which are assumed to be the "true" value of each currency in some unknown external currency. Computes fx rates from A -> B as by dividing the reference value for A by the reference value for B. Parameters ---------- reference : pd.DataFrame DataFrame of "true" values for currencies. Returns ------- rates : dict[str, pd.DataFrame] Map from quote currency to FX rates for that currency. """ out = {} for quote in reference.columns: out[quote] = reference.divide(reference[quote], axis=0) return out @classmethod def make_fx_rates(cls, rate_names, currencies, sessions): rng = np.random.RandomState(42) out = {} for rate_name in rate_names: cols = {} for currency in currencies: start, end = sorted(rng.uniform(0.5, 1.5, (2,))) cols[currency] = np.linspace(start, end, len(sessions)) reference = pd.DataFrame(cols, index=sessions, columns=currencies) out[rate_name] = cls.make_fx_rates_from_reference(reference) return out @classmethod def write_h5_fx_rates(cls, path): """Write cls.fx_rates to disk with an HDF5FXRateWriter. Returns an HDF5FXRateReader that reader from written data. """ sessions = cls.fx_rates_sessions # Write in-memory data to h5 file. with h5py.File(path, 'w') as h5_file: writer = HDF5FXRateWriter(h5_file, cls.HDF5_FX_CHUNK_SIZE) fx_data = ((rate, quote, quote_frame.values) for rate, rate_dict in cls.fx_rates.items() for quote, quote_frame in rate_dict.items()) writer.write( dts=sessions.values, currencies=np.array(cls.FX_RATES_CURRENCIES, dtype=object), data=fx_data, ) h5_file = cls.enter_class_context(h5py.File(path, 'r')) return HDF5FXRateReader( h5_file, default_rate=cls.FX_RATES_DEFAULT_RATE, ) @classmethod def get_expected_fx_rate_scalar(cls, rate, quote, base, dt): """Get the expected FX rate for the given scalar coordinates. """ if base is None: return np.nan if rate == DEFAULT_FX_RATE: rate = cls.FX_RATES_DEFAULT_RATE col = cls.fx_rates[rate][quote][base] if dt < col.index[0]: return np.nan # PERF: We call this function a lot in some suites, and get_loc is # surprisingly expensive, so optimizing it has a meaningful impact on # overall suite performance. See test_fast_get_loc_ffilled_for # assurance that this behaves the same as get_loc. ix = fast_get_loc_ffilled(col.index.values, dt.asm8) return col.values[ix] @classmethod def get_expected_fx_rates(cls, rate, quote, bases, dts): """Get an array of expected FX rates for the given indices. """ out = np.empty((len(dts), len(bases)), dtype='float64') for i, dt in enumerate(dts): for j, base in enumerate(bases): out[i, j] = cls.get_expected_fx_rate_scalar( rate, quote, base, dt, ) return out @classmethod def get_expected_fx_rates_columnar(cls, rate, quote, bases, dts): assert len(bases) == len(dts) rates = [ cls.get_expected_fx_rate_scalar(rate, quote, base, dt) for base, dt in zip(bases, dts) ] return np.array(rates, dtype='float64') def fast_get_loc_ffilled(dts, dt): """ Equivalent to dts.get_loc(dt, method='ffill'), but with reasonable microperformance. """ ix = dts.searchsorted(dt, side='right') - 1 if ix < 0: raise KeyError(dt) return ix
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/testing/fixtures.py
fixtures.py
from abc import ABCMeta, abstractmethod, abstractproperty from contextlib import contextmanager import gzip from itertools import ( combinations, count, product, ) import json import operator import os from os.path import abspath, dirname, join, realpath import shutil import sys import tempfile from traceback import format_exception from logbook import TestHandler from mock import patch from nose.tools import nottest from numpy.testing import assert_allclose, assert_array_equal import pandas as pd from six import itervalues, iteritems, with_metaclass from six.moves import filter, map from sqlalchemy import create_engine from testfixtures import TempDirectory from toolz import concat, curry from trading_calendars import get_calendar from zipline.assets import AssetFinder, AssetDBWriter from zipline.assets.synthetic import make_simple_equity_info from zipline.utils.compat import getargspec, wraps from zipline.data.data_portal import DataPortal from zipline.data.minute_bars import ( BcolzMinuteBarReader, BcolzMinuteBarWriter, US_EQUITIES_MINUTES_PER_DAY ) from zipline.data.bcolz_daily_bars import ( BcolzDailyBarReader, BcolzDailyBarWriter, ) from zipline.finance.blotter import SimulationBlotter from zipline.finance.order import ORDER_STATUS from zipline.lib.labelarray import LabelArray from zipline.pipeline.data import EquityPricing from zipline.pipeline.domain import EquitySessionDomain from zipline.pipeline.engine import SimplePipelineEngine from zipline.pipeline.factors import CustomFactor from zipline.pipeline.loaders.testing import make_seeded_random_loader from zipline.utils import security_list from zipline.utils.input_validation import expect_dimensions from zipline.utils.numpy_utils import as_column, isnat from zipline.utils.pandas_utils import timedelta_to_integral_seconds from zipline.utils.sentinel import sentinel import numpy as np from numpy import float64 EPOCH = pd.Timestamp(0, tz='UTC') def seconds_to_timestamp(seconds): return pd.Timestamp(seconds, unit='s', tz='UTC') def to_utc(time_str): """Convert a string in US/Eastern time to UTC""" return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC') def str_to_seconds(s): """ Convert a pandas-intelligible string to (integer) seconds since UTC. >>> from pandas import Timestamp >>> (Timestamp('2014-01-01') - Timestamp(0)).total_seconds() 1388534400.0 >>> str_to_seconds('2014-01-01') 1388534400 """ return timedelta_to_integral_seconds(pd.Timestamp(s, tz='UTC') - EPOCH) def drain_zipline(test, zipline): output = [] transaction_count = 0 msg_counter = 0 # start the simulation for update in zipline: msg_counter += 1 output.append(update) if 'daily_perf' in update: transaction_count += \ len(update['daily_perf']['transactions']) return output, transaction_count def check_algo_results(test, results, expected_transactions_count=None, expected_order_count=None, expected_positions_count=None, sid=None): if expected_transactions_count is not None: txns = flatten_list(results["transactions"]) test.assertEqual(expected_transactions_count, len(txns)) if expected_positions_count is not None: raise NotImplementedError if expected_order_count is not None: # de-dup orders on id, because orders are put back into perf packets # whenever they a txn is filled orders = set([order['id'] for order in flatten_list(results["orders"])]) test.assertEqual(expected_order_count, len(orders)) def flatten_list(list): return [item for sublist in list for item in sublist] def assert_single_position(test, zipline): output, transaction_count = drain_zipline(test, zipline) if 'expected_transactions' in test.zipline_test_config: test.assertEqual( test.zipline_test_config['expected_transactions'], transaction_count ) else: test.assertEqual( test.zipline_test_config['order_count'], transaction_count ) # the final message is the risk report, the second to # last is the final day's results. Positions is a list of # dicts. closing_positions = output[-2]['daily_perf']['positions'] # confirm that all orders were filled. # iterate over the output updates, overwriting # orders when they are updated. Then check the status on all. orders_by_id = {} for update in output: if 'daily_perf' in update: if 'orders' in update['daily_perf']: for order in update['daily_perf']['orders']: orders_by_id[order['id']] = order for order in itervalues(orders_by_id): test.assertEqual( order['status'], ORDER_STATUS.FILLED, "") test.assertEqual( len(closing_positions), 1, "Portfolio should have one position." ) sid = test.zipline_test_config['sid'] test.assertEqual( closing_positions[0]['sid'], sid, "Portfolio should have one position in " + str(sid) ) return output, transaction_count @contextmanager def security_list_copy(): old_dir = security_list.SECURITY_LISTS_DIR new_dir = tempfile.mkdtemp() try: for subdir in os.listdir(old_dir): shutil.copytree(os.path.join(old_dir, subdir), os.path.join(new_dir, subdir)) with patch.object(security_list, 'SECURITY_LISTS_DIR', new_dir), \ patch.object(security_list, 'using_copy', True, create=True): yield finally: shutil.rmtree(new_dir, True) def add_security_data(adds, deletes): if not hasattr(security_list, 'using_copy'): raise Exception('add_security_data must be used within ' 'security_list_copy context') directory = os.path.join( security_list.SECURITY_LISTS_DIR, "leveraged_etf_list/20150127/20150125" ) if not os.path.exists(directory): os.makedirs(directory) del_path = os.path.join(directory, "delete") with open(del_path, 'w') as f: for sym in deletes: f.write(sym) f.write('\n') add_path = os.path.join(directory, "add") with open(add_path, 'w') as f: for sym in adds: f.write(sym) f.write('\n') def all_pairs_matching_predicate(values, pred): """ Return an iterator of all pairs, (v0, v1) from values such that `pred(v0, v1) == True` Parameters ---------- values : iterable pred : function Returns ------- pairs_iterator : generator Generator yielding pairs matching `pred`. Examples -------- >>> from zipline.testing import all_pairs_matching_predicate >>> from operator import eq, lt >>> list(all_pairs_matching_predicate(range(5), eq)) [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)] >>> list(all_pairs_matching_predicate("abcd", lt)) [('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')] """ return filter(lambda pair: pred(*pair), product(values, repeat=2)) def product_upper_triangle(values, include_diagonal=False): """ Return an iterator over pairs, (v0, v1), drawn from values. If `include_diagonal` is True, returns all pairs such that v0 <= v1. If `include_diagonal` is False, returns all pairs such that v0 < v1. """ return all_pairs_matching_predicate( values, operator.le if include_diagonal else operator.lt, ) def all_subindices(index): """ Return all valid sub-indices of a pandas Index. """ return ( index[start:stop] for start, stop in product_upper_triangle(range(len(index) + 1)) ) def chrange(start, stop): """ Construct an iterable of length-1 strings beginning with `start` and ending with `stop`. Parameters ---------- start : str The first character. stop : str The last character. Returns ------- chars: iterable[str] Iterable of strings beginning with start and ending with stop. Examples -------- >>> chrange('A', 'C') ['A', 'B', 'C'] """ return list(map(chr, range(ord(start), ord(stop) + 1))) def make_trade_data_for_asset_info(dates, asset_info, price_start, price_step_by_date, price_step_by_sid, volume_start, volume_step_by_date, volume_step_by_sid): """ Convert the asset info dataframe into a dataframe of trade data for each sid, and write to the writer if provided. Write NaNs for locations where assets did not exist. Return a dict of the dataframes, keyed by sid. """ trade_data = {} sids = asset_info.index price_sid_deltas = np.arange(len(sids), dtype=float64) * price_step_by_sid price_date_deltas = (np.arange(len(dates), dtype=float64) * price_step_by_date) prices = (price_sid_deltas + as_column(price_date_deltas)) + price_start volume_sid_deltas = np.arange(len(sids)) * volume_step_by_sid volume_date_deltas = np.arange(len(dates)) * volume_step_by_date volumes = volume_sid_deltas + as_column(volume_date_deltas) + volume_start for j, sid in enumerate(sids): start_date, end_date = asset_info.loc[sid, ['start_date', 'end_date']] # Normalize here so the we still generate non-NaN values on the minutes # for an asset's last trading day. for i, date in enumerate(dates.normalize()): if not (start_date <= date <= end_date): prices[i, j] = 0 volumes[i, j] = 0 df = pd.DataFrame( { "open": prices[:, j], "high": prices[:, j], "low": prices[:, j], "close": prices[:, j], "volume": volumes[:, j], }, index=dates, ) trade_data[sid] = df return trade_data def check_allclose(actual, desired, rtol=1e-07, atol=0, err_msg='', verbose=True): """ Wrapper around np.testing.assert_allclose that also verifies that inputs are ndarrays. See Also -------- np.assert_allclose """ if type(actual) != type(desired): raise AssertionError("%s != %s" % (type(actual), type(desired))) return assert_allclose( actual, desired, atol=atol, rtol=rtol, err_msg=err_msg, verbose=verbose, ) def check_arrays(x, y, err_msg='', verbose=True, check_dtypes=True): """ Wrapper around np.testing.assert_array_equal that also verifies that inputs are ndarrays. See Also -------- np.assert_array_equal """ assert type(x) == type(y), "{x} != {y}".format(x=type(x), y=type(y)) assert x.dtype == y.dtype, "{x.dtype} != {y.dtype}".format(x=x, y=y) if isinstance(x, LabelArray): # Check that both arrays have missing values in the same locations... assert_array_equal( x.is_missing(), y.is_missing(), err_msg=err_msg, verbose=verbose, ) # ...then check the actual values as well. x = x.as_string_array() y = y.as_string_array() elif x.dtype.kind in 'mM': x_isnat = isnat(x) y_isnat = isnat(y) assert_array_equal( x_isnat, y_isnat, err_msg="NaTs not equal", verbose=verbose, ) # Fill NaTs with zero for comparison. x = np.where(x_isnat, np.zeros_like(x), x) y = np.where(y_isnat, np.zeros_like(y), y) return assert_array_equal(x, y, err_msg=err_msg, verbose=verbose) class UnexpectedAttributeAccess(Exception): pass class ExplodingObject(object): """ Object that will raise an exception on any attribute access. Useful for verifying that an object is never touched during a function/method call. """ def __getattribute__(self, name): raise UnexpectedAttributeAccess(name) def write_minute_data(trading_calendar, tempdir, minutes, sids): first_session = trading_calendar.minute_to_session_label( minutes[0], direction="none" ) last_session = trading_calendar.minute_to_session_label( minutes[-1], direction="none" ) sessions = trading_calendar.sessions_in_range(first_session, last_session) write_bcolz_minute_data( trading_calendar, sessions, tempdir.path, create_minute_bar_data(minutes, sids), ) return tempdir.path def create_minute_bar_data(minutes, sids): length = len(minutes) for sid_idx, sid in enumerate(sids): yield sid, pd.DataFrame( { 'open': np.arange(length) + 10 + sid_idx, 'high': np.arange(length) + 15 + sid_idx, 'low': np.arange(length) + 8 + sid_idx, 'close': np.arange(length) + 10 + sid_idx, 'volume': 100 + sid_idx, }, index=minutes, ) def create_daily_bar_data(sessions, sids): length = len(sessions) for sid_idx, sid in enumerate(sids): yield sid, pd.DataFrame( { "open": (np.array(range(10, 10 + length)) + sid_idx), "high": (np.array(range(15, 15 + length)) + sid_idx), "low": (np.array(range(8, 8 + length)) + sid_idx), "close": (np.array(range(10, 10 + length)) + sid_idx), "volume": np.array(range(100, 100 + length)) + sid_idx, "day": [session.value for session in sessions] }, index=sessions, ) def write_daily_data(tempdir, sim_params, sids, trading_calendar): path = os.path.join(tempdir.path, "testdaily.bcolz") BcolzDailyBarWriter(path, trading_calendar, sim_params.start_session, sim_params.end_session).write( create_daily_bar_data(sim_params.sessions, sids), ) return path def create_data_portal(asset_finder, tempdir, sim_params, sids, trading_calendar, adjustment_reader=None): if sim_params.data_frequency == "daily": daily_path = write_daily_data(tempdir, sim_params, sids, trading_calendar) equity_daily_reader = BcolzDailyBarReader(daily_path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_daily_reader.first_trading_day, equity_daily_reader=equity_daily_reader, adjustment_reader=adjustment_reader ) else: minutes = trading_calendar.minutes_in_range( sim_params.first_open, sim_params.last_close ) minute_path = write_minute_data(trading_calendar, tempdir, minutes, sids) equity_minute_reader = BcolzMinuteBarReader(minute_path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_minute_reader.first_trading_day, equity_minute_reader=equity_minute_reader, adjustment_reader=adjustment_reader ) def write_bcolz_minute_data(trading_calendar, days, path, data): BcolzMinuteBarWriter( path, trading_calendar, days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY ).write(data) def create_minute_df_for_asset(trading_calendar, start_dt, end_dt, interval=1, start_val=1, minute_blacklist=None): asset_minutes = trading_calendar.minutes_for_sessions_in_range( start_dt, end_dt ) minutes_count = len(asset_minutes) if interval > 1: minutes_arr = np.zeros(minutes_count) minutes_arr[interval-1::interval] = \ np.arange(start_val+interval-1, start_val+minutes_count, interval) else: minutes_arr = np.arange(start_val, start_val + minutes_count) open_ = minutes_arr.copy() open_[interval-1::interval] += 1 high = minutes_arr.copy() high[interval-1::interval] += 2 low = minutes_arr.copy() low[interval - 1::interval] -= 1 df = pd.DataFrame( { "open": open_, "high": high, "low": low, "close": minutes_arr, "volume": 100 * minutes_arr, }, index=asset_minutes, ) if minute_blacklist is not None: for minute in minute_blacklist: df.loc[minute] = 0 return df def create_daily_df_for_asset(trading_calendar, start_day, end_day, interval=1): days = trading_calendar.sessions_in_range(start_day, end_day) days_count = len(days) days_arr = np.arange(days_count) + 2 df = pd.DataFrame( { "open": days_arr + 1, "high": days_arr + 2, "low": days_arr - 1, "close": days_arr, "volume": days_arr * 100, }, index=days, ) if interval > 1: # only keep every 'interval' rows for idx, _ in enumerate(days_arr): if (idx + 1) % interval != 0: df["open"].iloc[idx] = 0 df["high"].iloc[idx] = 0 df["low"].iloc[idx] = 0 df["close"].iloc[idx] = 0 df["volume"].iloc[idx] = 0 return df def trades_by_sid_to_dfs(trades_by_sid, index): for sidint, trades in iteritems(trades_by_sid): opens = [] highs = [] lows = [] closes = [] volumes = [] for trade in trades: opens.append(trade.open_price) highs.append(trade.high) lows.append(trade.low) closes.append(trade.close_price) volumes.append(trade.volume) yield sidint, pd.DataFrame( { "open": opens, "high": highs, "low": lows, "close": closes, "volume": volumes, }, index=index, ) def create_data_portal_from_trade_history(asset_finder, trading_calendar, tempdir, sim_params, trades_by_sid): if sim_params.data_frequency == "daily": path = os.path.join(tempdir.path, "testdaily.bcolz") writer = BcolzDailyBarWriter( path, trading_calendar, sim_params.start_session, sim_params.end_session ) writer.write( trades_by_sid_to_dfs(trades_by_sid, sim_params.sessions), ) equity_daily_reader = BcolzDailyBarReader(path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_daily_reader.first_trading_day, equity_daily_reader=equity_daily_reader, ) else: minutes = trading_calendar.minutes_in_range( sim_params.first_open, sim_params.last_close ) length = len(minutes) assets = {} for sidint, trades in iteritems(trades_by_sid): opens = np.zeros(length) highs = np.zeros(length) lows = np.zeros(length) closes = np.zeros(length) volumes = np.zeros(length) for trade in trades: # put them in the right place idx = minutes.searchsorted(trade.dt) opens[idx] = trade.open_price * 1000 highs[idx] = trade.high * 1000 lows[idx] = trade.low * 1000 closes[idx] = trade.close_price * 1000 volumes[idx] = trade.volume assets[sidint] = pd.DataFrame({ "open": opens, "high": highs, "low": lows, "close": closes, "volume": volumes, "dt": minutes }).set_index("dt") write_bcolz_minute_data( trading_calendar, sim_params.sessions, tempdir.path, assets ) equity_minute_reader = BcolzMinuteBarReader(tempdir.path) return DataPortal( asset_finder, trading_calendar, first_trading_day=equity_minute_reader.first_trading_day, equity_minute_reader=equity_minute_reader, ) class FakeDataPortal(DataPortal): def __init__(self, asset_finder, trading_calendar=None, first_trading_day=None): if trading_calendar is None: trading_calendar = get_calendar("NYSE") super(FakeDataPortal, self).__init__(asset_finder, trading_calendar, first_trading_day) def get_spot_value(self, asset, field, dt, data_frequency): if field == "volume": return 100 else: return 1.0 def get_scalar_asset_spot_value(self, asset, field, dt, data_frequency): if field == "volume": return 100 else: return 1.0 def get_history_window(self, assets, end_dt, bar_count, frequency, field, data_frequency, ffill=True): end_idx = self.trading_calendar.all_sessions.searchsorted(end_dt) days = self.trading_calendar.all_sessions[ (end_idx - bar_count + 1):(end_idx + 1) ] df = pd.DataFrame( np.full((bar_count, len(assets)), 100.0), index=days, columns=assets ) if frequency == "1m" and not df.empty: df = df.reindex( self.trading_calendar.minutes_for_sessions_in_range( df.index[0], df.index[-1], ), method='ffill', ) return df class FetcherDataPortal(DataPortal): """ Mock dataportal that returns fake data for history and non-fetcher spot value. """ def __init__(self, asset_finder, trading_calendar, first_trading_day=None): super(FetcherDataPortal, self).__init__(asset_finder, trading_calendar, first_trading_day) def get_spot_value(self, asset, field, dt, data_frequency): # if this is a fetcher field, exercise the regular code path if self._is_extra_source(asset, field, self._augmented_sources_map): return super(FetcherDataPortal, self).get_spot_value( asset, field, dt, data_frequency) # otherwise just return a fixed value return int(asset) # XXX: These aren't actually the methods that are used by the superclasses, # so these don't do anything, and this class will likely produce unexpected # results for history(). def _get_daily_window_for_sid(self, asset, field, days_in_window, extra_slot=True): return np.arange(days_in_window, dtype=np.float64) def _get_minute_window_for_asset(self, asset, field, minutes_for_window): return np.arange(minutes_for_window, dtype=np.float64) class tmp_assets_db(object): """Create a temporary assets sqlite database. This is meant to be used as a context manager. Parameters ---------- url : string The URL for the database connection. **frames The frames to pass to the AssetDBWriter. By default this maps equities: ('A', 'B', 'C') -> map(ord, 'ABC') See Also -------- empty_assets_db tmp_asset_finder """ _default_equities = sentinel('_default_equities') def __init__(self, url='sqlite:///:memory:', equities=_default_equities, **frames): self._url = url self._eng = None if equities is self._default_equities: equities = make_simple_equity_info( list(map(ord, 'ABC')), pd.Timestamp(0), pd.Timestamp('2015'), ) frames['equities'] = equities self._frames = frames self._eng = None # set in enter and exit def __enter__(self): self._eng = eng = create_engine(self._url) AssetDBWriter(eng).write(**self._frames) return eng def __exit__(self, *excinfo): assert self._eng is not None, '_eng was not set in __enter__' self._eng.dispose() self._eng = None def empty_assets_db(): """Context manager for creating an empty assets db. See Also -------- tmp_assets_db """ return tmp_assets_db(equities=None) class tmp_asset_finder(tmp_assets_db): """Create a temporary asset finder using an in memory sqlite db. Parameters ---------- url : string The URL for the database connection. finder_cls : type, optional The type of asset finder to create from the assets db. **frames Forwarded to ``tmp_assets_db``. See Also -------- tmp_assets_db """ def __init__(self, url='sqlite:///:memory:', finder_cls=AssetFinder, future_chain_predicates=None, **frames): self._finder_cls = finder_cls self._future_chain_predicates = future_chain_predicates super(tmp_asset_finder, self).__init__(url=url, **frames) def __enter__(self): return self._finder_cls( super(tmp_asset_finder, self).__enter__(), future_chain_predicates=self._future_chain_predicates, ) def empty_asset_finder(): """Context manager for creating an empty asset finder. See Also -------- empty_assets_db tmp_assets_db tmp_asset_finder """ return tmp_asset_finder(equities=None) class SubTestFailures(AssertionError): def __init__(self, *failures): self.failures = failures @staticmethod def _format_exc(exc_info): # we need to do this weird join-split-join to ensure that the full # message is indented by 4 spaces return '\n '.join(''.join(format_exception(*exc_info)).splitlines()) def __str__(self): return 'failures:\n %s' % '\n '.join( '\n '.join(( ', '.join('%s=%r' % item for item in scope.items()), self._format_exc(exc_info), )) for scope, exc_info in self.failures ) @nottest def subtest(iterator, *_names): """ Construct a subtest in a unittest. Consider using ``zipline.testing.parameter_space`` when subtests are constructed over a single input or over the cross-product of multiple inputs. ``subtest`` works by decorating a function as a subtest. The decorated function will be run by iterating over the ``iterator`` and *unpacking the values into the function. If any of the runs fail, the result will be put into a set and the rest of the tests will be run. Finally, if any failed, all of the results will be dumped as one failure. Parameters ---------- iterator : iterable[iterable] The iterator of arguments to pass to the function. *name : iterator[str] The names to use for each element of ``iterator``. These will be used to print the scope when a test fails. If not provided, it will use the integer index of the value as the name. Examples -------- :: class MyTest(TestCase): def test_thing(self): # Example usage inside another test. @subtest(([n] for n in range(100000)), 'n') def subtest(n): self.assertEqual(n % 2, 0, 'n was not even') subtest() @subtest(([n] for n in range(100000)), 'n') def test_decorated_function(self, n): # Example usage to parameterize an entire function. self.assertEqual(n % 2, 1, 'n was not odd') Notes ----- We use this when we: * Will never want to run each parameter individually. * Have a large parameter space we are testing (see tests/utils/test_events.py). ``nose_parameterized.expand`` will create a test for each parameter combination which bloats the test output and makes the travis pages slow. We cannot use ``unittest2.TestCase.subTest`` because nose, pytest, and nose2 do not support ``addSubTest``. See Also -------- zipline.testing.parameter_space """ def dec(f): @wraps(f) def wrapped(*args, **kwargs): names = _names failures = [] for scope in iterator: scope = tuple(scope) try: f(*args + scope, **kwargs) except Exception: info = sys.exc_info() if not names: names = count() failures.append((dict(zip(names, scope)), info)) if failures: raise SubTestFailures(*failures) return wrapped return dec class MockDailyBarReader(object): def __init__(self, dates): self.sessions = pd.DatetimeIndex(dates) def load_raw_arrays(self, columns, start, stop, sids): dates = self.sessions if start < dates[0]: raise ValueError('start date is out of bounds for this reader') if stop > dates[-1]: raise ValueError('stop date is out of bounds for this reader') output_dates = dates[(dates >= start) & (dates <= stop)] return [ np.full((len(output_dates), len(sids)), 100.0) for _ in columns ] def get_value(self, col, sid, dt): return 100.0 def create_mock_adjustment_data(splits=None, dividends=None, mergers=None): if splits is None: splits = create_empty_splits_mergers_frame() elif not isinstance(splits, pd.DataFrame): splits = pd.DataFrame(splits) if mergers is None: mergers = create_empty_splits_mergers_frame() elif not isinstance(mergers, pd.DataFrame): mergers = pd.DataFrame(mergers) if dividends is None: dividends = create_empty_dividends_frame() elif not isinstance(dividends, pd.DataFrame): dividends = pd.DataFrame(dividends) return splits, mergers, dividends def assert_timestamp_equal(left, right, compare_nat_equal=True, msg=""): """ Assert that two pandas Timestamp objects are the same. Parameters ---------- left, right : pd.Timestamp The values to compare. compare_nat_equal : bool, optional Whether to consider `NaT` values equal. Defaults to True. msg : str, optional A message to forward to `pd.util.testing.assert_equal`. """ if compare_nat_equal and left is pd.NaT and right is pd.NaT: return return pd.util.testing.assert_equal(left, right, msg=msg) def powerset(values): """ Return the power set (i.e., the set of all subsets) of entries in `values`. """ return concat(combinations(values, i) for i in range(len(values) + 1)) def to_series(knowledge_dates, earning_dates): """ Helper for converting a dict of strings to a Series of datetimes. This is just for making the test cases more readable. """ return pd.Series( index=pd.to_datetime(knowledge_dates), data=pd.to_datetime(earning_dates), ) def gen_calendars(start, stop, critical_dates): """ Generate calendars to use as inputs. """ all_dates = pd.date_range(start, stop, tz='utc') for to_drop in map(list, powerset(critical_dates)): # Have to yield tuples. yield (all_dates.drop(to_drop),) # Also test with the trading calendar. trading_days = get_calendar("NYSE").all_days yield (trading_days[trading_days.slice_indexer(start, stop)],) @contextmanager def temp_pipeline_engine(calendar, sids, random_seed, symbols=None): """ A contextManager that yields a SimplePipelineEngine holding a reference to an AssetFinder generated via tmp_asset_finder. Parameters ---------- calendar : pd.DatetimeIndex Calendar to pass to the constructed PipelineEngine. sids : iterable[int] Sids to use for the temp asset finder. random_seed : int Integer used to seed instances of SeededRandomLoader. symbols : iterable[str], optional Symbols for constructed assets. Forwarded to make_simple_equity_info. """ equity_info = make_simple_equity_info( sids=sids, start_date=calendar[0], end_date=calendar[-1], symbols=symbols, ) loader = make_seeded_random_loader(random_seed, calendar, sids) def get_loader(column): return loader with tmp_asset_finder(equities=equity_info) as finder: yield SimplePipelineEngine(get_loader, calendar, finder) def bool_from_envvar(name, default=False, env=None): """ Get a boolean value from the environment, making a reasonable attempt to convert "truthy" values to True and "falsey" values to False. Strings are coerced to bools using ``json.loads(s.lower())``. Parameters ---------- name : str Name of the environment variable. default : bool, optional Value to use if the environment variable isn't set. Default is False env : dict-like, optional Mapping in which to look up ``name``. This is a parameter primarily for testing purposes. Default is os.environ. Returns ------- value : bool ``env[name]`` coerced to a boolean, or ``default`` if ``name`` is not in ``env``. """ if env is None: env = os.environ value = env.get(name) if value is None: return default try: # Try to parse as JSON. This makes strings like "0", "False", and # "null" evaluate as falsey values. value = json.loads(value.lower()) except ValueError: # If the value can't be parsed as json, assume it should be treated as # a string for the purpose of evaluation. pass return bool(value) _FAIL_FAST_DEFAULT = bool_from_envvar('PARAMETER_SPACE_FAIL_FAST') def parameter_space(__fail_fast=_FAIL_FAST_DEFAULT, **params): """ Wrapper around subtest that allows passing keywords mapping names to iterables of values. The decorated test function will be called with the cross-product of all possible inputs Examples -------- >>> from unittest import TestCase >>> class SomeTestCase(TestCase): ... @parameter_space(x=[1, 2], y=[2, 3]) ... def test_some_func(self, x, y): ... # Will be called with every possible combination of x and y. ... self.assertEqual(somefunc(x, y), expected_result(x, y)) See Also -------- zipline.testing.subtest """ def decorator(f): argspec = getargspec(f) if argspec.varargs: raise AssertionError("parameter_space() doesn't support *args") if argspec.keywords: raise AssertionError("parameter_space() doesn't support **kwargs") if argspec.defaults: raise AssertionError("parameter_space() doesn't support defaults.") # Skip over implicit self. argnames = argspec.args if argnames[0] == 'self': argnames = argnames[1:] extra = set(params) - set(argnames) if extra: raise AssertionError( "Keywords %s supplied to parameter_space() are " "not in function signature." % extra ) unspecified = set(argnames) - set(params) if unspecified: raise AssertionError( "Function arguments %s were not " "supplied to parameter_space()." % unspecified ) def make_param_sets(): return product(*(params[name] for name in argnames)) def clean_f(self, *args, **kwargs): try: f(self, *args, **kwargs) finally: self.tearDown() self.setUp() if __fail_fast: @wraps(f) def wrapped(self): for args in make_param_sets(): clean_f(self, *args) return wrapped else: @wraps(f) def wrapped(*args, **kwargs): subtest(make_param_sets(), *argnames)(clean_f)(*args, **kwargs) return wrapped return decorator def create_empty_dividends_frame(): return pd.DataFrame( np.array( [], dtype=[ ('ex_date', 'datetime64[ns]'), ('pay_date', 'datetime64[ns]'), ('record_date', 'datetime64[ns]'), ('declared_date', 'datetime64[ns]'), ('amount', 'float64'), ('sid', 'int32'), ], ), index=pd.DatetimeIndex([], tz='UTC'), ) def create_empty_splits_mergers_frame(): return pd.DataFrame( np.array( [], dtype=[ ('effective_date', 'int64'), ('ratio', 'float64'), ('sid', 'int64'), ], ), index=pd.DatetimeIndex([]), ) def make_alternating_boolean_array(shape, first_value=True): """ Create a 2D numpy array with the given shape containing alternating values of False, True, False, True,... along each row and each column. Examples -------- >>> make_alternating_boolean_array((4,4)) array([[ True, False, True, False], [False, True, False, True], [ True, False, True, False], [False, True, False, True]], dtype=bool) >>> make_alternating_boolean_array((4,3), first_value=False) array([[False, True, False], [ True, False, True], [False, True, False], [ True, False, True]], dtype=bool) """ if len(shape) != 2: raise ValueError( 'Shape must be 2-dimensional. Given shape was {}'.format(shape) ) alternating = np.empty(shape, dtype=np.bool) for row in alternating: row[::2] = first_value row[1::2] = not(first_value) first_value = not(first_value) return alternating def make_cascading_boolean_array(shape, first_value=True): """ Create a numpy array with the given shape containing cascading boolean values, with `first_value` being the top-left value. Examples -------- >>> make_cascading_boolean_array((4,4)) array([[ True, True, True, False], [ True, True, False, False], [ True, False, False, False], [False, False, False, False]], dtype=bool) >>> make_cascading_boolean_array((4,2)) array([[ True, False], [False, False], [False, False], [False, False]], dtype=bool) >>> make_cascading_boolean_array((2,4)) array([[ True, True, True, False], [ True, True, False, False]], dtype=bool) """ if len(shape) != 2: raise ValueError( 'Shape must be 2-dimensional. Given shape was {}'.format(shape) ) cascading = np.full(shape, not(first_value), dtype=np.bool) ending_col = shape[1] - 1 for row in cascading: if ending_col > 0: row[:ending_col] = first_value ending_col -= 1 else: break return cascading @expect_dimensions(array=2) def permute_rows(seed, array): """ Shuffle each row in ``array`` based on permutations generated by ``seed``. Parameters ---------- seed : int Seed for numpy.RandomState array : np.ndarray[ndim=2] Array over which to apply permutations. """ rand = np.random.RandomState(seed) return np.apply_along_axis(rand.permutation, 1, array) @nottest def make_test_handler(testcase, *args, **kwargs): """ Returns a TestHandler which will be used by the given testcase. This handler can be used to test log messages. Parameters ---------- testcase: unittest.TestCase The test class in which the log handler will be used. *args, **kwargs Forwarded to the new TestHandler object. Returns ------- handler: logbook.TestHandler The handler to use for the test case. """ handler = TestHandler(*args, **kwargs) testcase.addCleanup(handler.close) return handler def write_compressed(path, content): """ Write a compressed (gzipped) file to `path`. """ with gzip.open(path, 'wb') as f: f.write(content) def read_compressed(path): """ Write a compressed (gzipped) file from `path`. """ with gzip.open(path, 'rb') as f: return f.read() zipline_git_root = abspath( join(realpath(dirname(__file__)), '..', '..'), ) @nottest def test_resource_path(*path_parts): return os.path.join(zipline_git_root, 'tests', 'resources', *path_parts) @contextmanager def patch_os_environment(remove=None, **values): """ Context manager for patching the operating system environment. """ old_values = {} remove = remove or [] for key in remove: old_values[key] = os.environ.pop(key) for key, value in values.iteritems(): old_values[key] = os.getenv(key) os.environ[key] = value try: yield finally: for old_key, old_value in old_values.iteritems(): if old_value is None: # Value was not present when we entered, so del it out if it's # still present. try: del os.environ[key] except KeyError: pass else: # Restore the old value. os.environ[old_key] = old_value class tmp_dir(TempDirectory, object): """New style class that wrapper for TempDirectory in python 2. """ pass class _TmpBarReader(with_metaclass(ABCMeta, tmp_dir)): """A helper for tmp_bcolz_equity_minute_bar_reader and tmp_bcolz_equity_daily_bar_reader. Parameters ---------- days : pd.DatetimeIndex The days to write for. data : dict[int -> pd.DataFrame] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. """ @abstractproperty def _reader_cls(self): raise NotImplementedError('_reader') @abstractmethod def _write(self, cal, days, path, data): raise NotImplementedError('_write') def __init__(self, cal, days, data, path=None): super(_TmpBarReader, self).__init__(path=path) self._cal = cal self._days = days self._data = data def __enter__(self): tmpdir = super(_TmpBarReader, self).__enter__() try: self._write( self._cal, self._days, tmpdir.path, self._data, ) return self._reader_cls(tmpdir.path) except BaseException: # Clean up even on KeyboardInterrupt self.__exit__(None, None, None) raise class tmp_bcolz_equity_minute_bar_reader(_TmpBarReader): """A temporary BcolzMinuteBarReader object. Parameters ---------- cal : TradingCalendar The trading calendar for which we're writing data. days : pd.DatetimeIndex The days to write for. data : iterable[(int, pd.DataFrame)] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. See Also -------- tmp_bcolz_equity_daily_bar_reader """ _reader_cls = BcolzMinuteBarReader _write = staticmethod(write_bcolz_minute_data) class tmp_bcolz_equity_daily_bar_reader(_TmpBarReader): """A temporary BcolzDailyBarReader object. Parameters ---------- cal : TradingCalendar The trading calendar for which we're writing data. days : pd.DatetimeIndex The days to write for. data : dict[int -> pd.DataFrame] The data to write. path : str, optional The path to the directory to write the data into. If not given, this will be a unique name. See Also -------- tmp_bcolz_equity_daily_bar_reader """ _reader_cls = BcolzDailyBarReader @staticmethod def _write(cal, days, path, data): BcolzDailyBarWriter(path, days).write(data) @contextmanager def patch_read_csv(url_map, module=pd, strict=False): """Patch pandas.read_csv to map lookups from url to another. Parameters ---------- url_map : mapping[str or file-like object -> str or file-like object] The mapping to use to redirect read_csv calls. module : module, optional The module to patch ``read_csv`` on. By default this is ``pandas``. This should be set to another module if ``read_csv`` is early-bound like ``from pandas import read_csv`` instead of late-bound like: ``import pandas as pd; pd.read_csv``. strict : bool, optional If true, then this will assert that ``read_csv`` is only called with elements in the ``url_map``. """ read_csv = pd.read_csv def patched_read_csv(filepath_or_buffer, *args, **kwargs): if filepath_or_buffer in url_map: return read_csv(url_map[filepath_or_buffer], *args, **kwargs) elif not strict: return read_csv(filepath_or_buffer, *args, **kwargs) else: raise AssertionError( 'attempted to call read_csv on %r which not in the url map' % filepath_or_buffer, ) with patch.object(module, 'read_csv', patched_read_csv): yield @curry def ensure_doctest(f, name=None): """Ensure that an object gets doctested. This is useful for instances of objects like curry or partial which are not discovered by default. Parameters ---------- f : any The thing to doctest. name : str, optional The name to use in the doctest function mapping. If this is None, Then ``f.__name__`` will be used. Returns ------- f : any ``f`` unchanged. """ sys._getframe(2).f_globals.setdefault('__test__', {})[ f.__name__ if name is None else name ] = f return f class RecordBatchBlotter(SimulationBlotter): """Blotter that tracks how its batch_order method was called. """ def __init__(self): super(RecordBatchBlotter, self).__init__() self.order_batch_called = [] def batch_order(self, *args, **kwargs): self.order_batch_called.append((args, kwargs)) return super(RecordBatchBlotter, self).batch_order(*args, **kwargs) class AssetID(CustomFactor): """ CustomFactor that returns the AssetID of each asset. Useful for providing a Factor that produces a different value for each asset. """ window_length = 1 inputs = () def compute(self, today, assets, out): out[:] = assets class AssetIDPlusDay(CustomFactor): window_length = 1 inputs = () def compute(self, today, assets, out): out[:] = assets + today.day class OpenPrice(CustomFactor): window_length = 1 inputs = [EquityPricing.open] def compute(self, today, assets, out, open): out[:] = open def prices_generating_returns(returns, starting_price): """Construct the time series of prices that produce the given returns. Parameters ---------- returns : np.ndarray[float] The returns that these prices generate. starting_price : float The value of the asset. Returns ------- prices : np.ndaray[float] The prices that generate the given returns. This array will be one element longer than ``returns`` and ``prices[0] == starting_price``. """ raw_prices = starting_price * (1 + np.append([0], returns)).cumprod() rounded_prices = raw_prices.round(3) if not np.allclose(raw_prices, rounded_prices): raise ValueError( 'Prices only have 3 decimal places of precision. There is no valid' ' price series that generate these returns.', ) return rounded_prices def random_tick_prices(starting_price, count, tick_size=0.01, tick_range=(-5, 7), seed=42): """ Construct a time series of prices that ticks by a random multiple of ``tick_size`` every period. Parameters ---------- starting_price : float The first price of the series. count : int Number of price observations to return. tick_size : float Unit of price movement between observations. tick_range : (int, int) Pair of lower/upper bounds for different in the number of ticks between price observations. seed : int, optional Seed to use for random number generation. """ out = np.full(count, starting_price, dtype=float) rng = np.random.RandomState(seed) diff = rng.randint(tick_range[0], tick_range[1], size=len(out) - 1) ticks = starting_price + diff.cumsum() * tick_size out[1:] = ticks return out def simulate_minutes_for_day(open_, high, low, close, volume, trading_minutes=390, random_state=None): """Generate a random walk of minute returns which meets the given OHLCV profile for an asset. The volume will be evenly distributed through the day. Parameters ---------- open_ : float The day's open. high : float The day's high. low : float The day's low. close : float The day's close. volume : float The day's volume. trading_minutes : int, optional The number of minutes to simulate. random_state : numpy.random.RandomState, optional The random state to use. If not provided, the global numpy state is used. """ if random_state is None: random_state = np.random sub_periods = 5 values = (random_state.rand(trading_minutes * sub_periods) - 0.5).cumsum() values *= (high - low) / (values.max() - values.min()) values += np.linspace( open_ - values[0], close - values[-1], len(values), ) assert np.allclose(open_, values[0]) assert np.allclose(close, values[-1]) max_ = max(close, open_) where = values > max_ values[where] = ( (values[where] - max_) * (high - max_) / (values.max() - max_) + max_ ) min_ = min(close, open_) where = values < min_ values[where] = ( (values[where] - min_) * (low - min_) / (values.min() - min_) + min_ ) if not (np.allclose(values.max(), high) and np.allclose(values.min(), low)): return simulate_minutes_for_day( open_, high, low, close, volume, trading_minutes, random_state=random_state, ) prices = pd.Series(values.round(3)).groupby( np.arange(trading_minutes).repeat(sub_periods), ) base_volume, remainder = divmod(volume, trading_minutes) volume = np.full(trading_minutes, base_volume, dtype='int64') volume[:remainder] += 1 # TODO: add in volume return pd.DataFrame({ 'open': prices.first(), 'close': prices.last(), 'high': prices.max(), 'low': prices.min(), 'volume': volume, }) def create_simple_domain(start, end, country_code): """Create a new pipeline domain with a simple date_range index. """ return EquitySessionDomain(pd.date_range(start, end), country_code) def write_hdf5_daily_bars(writer, asset_finder, country_codes, generate_data, generate_currency_codes): """Write an HDF5 file of pricing data using an HDF5DailyBarWriter. """ asset_finder = asset_finder for country_code in country_codes: sids = asset_finder.equities_sids_for_country_code(country_code) # XXX: The contract for generate_data is that it should return an # iterator of (sid, df) pairs with entry for each sid in `sids`, and # the contract for `generate_currency_codes` is that it should return a # series indexed by the sids it receives. # # Unfortunately, some of our tests that were written before the # introduction of multiple markets (in particular, the ones that use # EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE), provide a function that always # returns the same iterator, regardless of the provided `sids`, which # means there are cases where the sids in `data` don't match the sids # in `currency_codes`, which causes an assertion failure in # `write_from_sid_df_pairs`. # # The correct fix for this is to update those old tests to respect # `sids` (most likely by updating `make_equity_minute_bar_sids` to # support multiple countries). But that requires updating a lot of # tests, so for now, we call `generate_data` and use the sids it # produces to determine what to pass to `generate_country_codes`. data = list(generate_data(country_code=country_code, sids=sids)) data_sids = [p[0] for p in data] currency_codes = generate_currency_codes( country_code=country_code, sids=data_sids, ) writer.write_from_sid_df_pairs( country_code, iter(data), currency_codes=currency_codes, ) def exchange_info_for_domains(domains): """ Build an exchange_info suitable for passing to an AssetFinder from a list of EquityCalendarDomain. """ return pd.DataFrame.from_records([ {'exchange': domain.calendar.name, 'country_code': domain.country_code} for domain in domains ])
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/testing/core.py
core.py
import numpy as np from zipline.pipeline.factors.factor import CustomFactor from zipline.pipeline.classifiers.classifier import CustomClassifier from zipline.utils.idbox import IDBox from .predicates import assert_equal class CheckWindowsMixin(object): params = ('expected_windows',) def compute(self, today, assets, out, input_, expected_windows): for asset, expected_by_day in expected_windows: expected_by_day = expected_by_day.ob col_ix = np.searchsorted(assets, asset) if assets[col_ix] != asset: raise AssertionError('asset %s is not in the window' % asset) try: expected = expected_by_day[today] except KeyError: pass else: expected = np.asanyarray(expected) actual = input_[:, col_ix] assert_equal(actual, expected, array_decimal=(6 if expected.dtype.kind == 'f' else None)) # output is just latest out[:] = input_[-1] class CheckWindowsClassifier(CheckWindowsMixin, CustomClassifier): """A custom classifier that makes assertions about the lookback windows that it gets passed. Parameters ---------- input_ : Term The input term to the classifier. window_length : int The length of the lookback window. expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]] For each asset, for each day, what the expected lookback window is. Notes ----- The output of this classifier is the same as ``Latest``. Any assets or days not in ``expected_windows`` are not checked. """ def __new__(cls, input_, window_length, expected_windows): if input_.dtype.kind == 'V': dtype = np.dtype('O') else: dtype = input_.dtype return super(CheckWindowsClassifier, cls).__new__( cls, inputs=[input_], dtype=dtype, window_length=window_length, expected_windows=frozenset( (k, IDBox(v)) for k, v in expected_windows.items() ), ) class CheckWindowsFactor(CheckWindowsMixin, CustomFactor): """A custom factor that makes assertions about the lookback windows that it gets passed. Parameters ---------- input_ : Term The input term to the factor. window_length : int The length of the lookback window. expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]] For each asset, for each day, what the expected lookback window is. Notes ----- The output of this factor is the same as ``Latest``. Any assets or days not in ``expected_windows`` are not checked. """ def __new__(cls, input_, window_length, expected_windows): return super(CheckWindowsFactor, cls).__new__( cls, inputs=[input_], dtype=input_.dtype, window_length=window_length, expected_windows=frozenset( (k, IDBox(v)) for k, v in expected_windows.items() ), )
zipline-trader
/zipline_trader-1.6.1-cp36-cp36m-win32.whl/zipline/testing/pipeline_terms.py
pipeline_terms.py
.. image:: https://media.quantopian.com/logos/open_source/zipline-logo-03_.png :target: https://www.zipline.io :width: 212px :align: center :alt: Zipline ============= |Gitter| |pypi version status| |pypi pyversion status| |travis status| |appveyor status| |Coverage Status| Zipline is a Pythonic algorithmic trading library. It is an event-driven system for backtesting. Zipline is currently used in production as the backtesting and live-trading engine powering `Quantopian <https://www.quantopian.com>`_ -- a free, community-centered, hosted platform for building and executing trading strategies. Quantopian also offers a `fully managed service for professionals <https://factset.quantopian.com>`_ that includes Zipline, Alphalens, Pyfolio, FactSet data, and more. - `Join our Community! <https://groups.google.com/forum/#!forum/zipline>`_ - `Documentation <https://www.zipline.io>`_ - Want to Contribute? See our `Development Guidelines <https://www.zipline.io/development-guidelines>`_ Features ======== - **Ease of Use:** Zipline tries to get out of your way so that you can focus on algorithm development. See below for a code example. - **"Batteries Included":** many common statistics like moving average and linear regression can be readily accessed from within a user-written algorithm. - **PyData Integration:** Input of historical data and output of performance statistics are based on Pandas DataFrames to integrate nicely into the existing PyData ecosystem. - **Statistics and Machine Learning Libraries:** You can use libraries like matplotlib, scipy, statsmodels, and sklearn to support development, analysis, and visualization of state-of-the-art trading systems. Installation ============ Zipline currently supports Python 2.7, 3.5, and 3.6, and may be installed via either pip or conda. **Note:** Installing Zipline is slightly more involved than the average Python package. See the full `Zipline Install Documentation`_ for detailed instructions. For a development installation (used to develop Zipline itself), create and activate a virtualenv, then run the ``etc/dev-install`` script. Quickstart ========== See our `getting started tutorial <https://www.zipline.io/beginner-tutorial>`_. The following code implements a simple dual moving average algorithm. .. code:: python from zipline.api import order_target, record, symbol def initialize(context): context.i = 0 context.asset = symbol('AAPL') def handle_data(context, data): # Skip first 300 days to get full windows context.i += 1 if context.i < 300: return # Compute averages # data.history() has to be called with the same params # from above and returns a pandas dataframe. short_mavg = data.history(context.asset, 'price', bar_count=100, frequency="1d").mean() long_mavg = data.history(context.asset, 'price', bar_count=300, frequency="1d").mean() # Trading logic if short_mavg > long_mavg: # order_target orders as many shares as needed to # achieve the desired number of shares. order_target(context.asset, 100) elif short_mavg < long_mavg: order_target(context.asset, 0) # Save values for later inspection record(AAPL=data.current(context.asset, 'price'), short_mavg=short_mavg, long_mavg=long_mavg) You can then run this algorithm using the Zipline CLI. First, you must download some sample pricing and asset data: .. code:: bash $ zipline ingest $ zipline run -f dual_moving_average.py --start 2014-1-1 --end 2018-1-1 -o dma.pickle --no-benchmark This will download asset pricing data data sourced from Quandl, and stream it through the algorithm over the specified time range. Then, the resulting performance DataFrame is saved in ``dma.pickle``, which you can load and analyze from within Python. You can find other examples in the ``zipline/examples`` directory. Questions? ========== If you find a bug, feel free to `open an issue <https://github.com/quantopian/zipline/issues/new>`_ and fill out the issue template. Contributing ============ All contributions, bug reports, bug fixes, documentation improvements, enhancements, and ideas are welcome. Details on how to set up a development environment can be found in our `development guidelines <https://www.zipline.io/development-guidelines>`_. If you are looking to start working with the Zipline codebase, navigate to the GitHub `issues` tab and start looking through interesting issues. Sometimes there are issues labeled as `Beginner Friendly <https://github.com/quantopian/zipline/issues?q=is%3Aissue+is%3Aopen+label%3A%22Beginner+Friendly%22>`_ or `Help Wanted <https://github.com/quantopian/zipline/issues?q=is%3Aissue+is%3Aopen+label%3A%22Help+Wanted%22>`_. Feel free to ask questions on the `mailing list <https://groups.google.com/forum/#!forum/zipline>`_ or on `Gitter <https://gitter.im/quantopian/zipline>`_. .. note:: Please note that Zipline is not a community-led project. Zipline is maintained by the Quantopian engineering team, and we are quite small and often busy. Because of this, we want to warn you that we may not attend to your pull request, issue, or direct mention in months, or even years. We hope you understand, and we hope that this note might help reduce any frustration or wasted time. .. |Gitter| image:: https://badges.gitter.im/Join%20Chat.svg :target: https://gitter.im/quantopian/zipline?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge .. |pypi version status| image:: https://img.shields.io/pypi/v/zipline.svg :target: https://pypi.python.org/pypi/zipline .. |pypi pyversion status| image:: https://img.shields.io/pypi/pyversions/zipline.svg :target: https://pypi.python.org/pypi/zipline .. |travis status| image:: https://travis-ci.org/quantopian/zipline.svg?branch=master :target: https://travis-ci.org/quantopian/zipline .. |appveyor status| image:: https://ci.appveyor.com/api/projects/status/3dg18e6227dvstw6/branch/master?svg=true :target: https://ci.appveyor.com/project/quantopian/zipline/branch/master .. |Coverage Status| image:: https://coveralls.io/repos/quantopian/zipline/badge.svg :target: https://coveralls.io/r/quantopian/zipline .. _`Zipline Install Documentation` : https://www.zipline.io/install
zipline
/zipline-1.4.1.tar.gz/zipline-1.4.1/README.rst
README.rst
Contributing to Zipline ======================= For developers of Zipline, people who want to contribute to the Zipline codebase or documentation, or people who want to install from source and make local changes to their copy of Zipline, please refer to the `Development Guidelines`__ if you would like to contribute. All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome. We `track issues`__ on `GitHub`__ and also have a `mailing list`__ where you can ask questions. __ https://www.zipline.io/development-guidelines.html __ https://github.com/quantopian/zipline/issues __ https://github.com/ __ https://groups.google.com/forum/#!forum/zipline
zipline
/zipline-1.4.1.tar.gz/zipline-1.4.1/CONTRIBUTING.rst
CONTRIBUTING.rst